code stringlengths 17 6.64M |
|---|
def main(argv=None):
tf.reset_default_graph()
keep_prob = tf.placeholder(tf.float32, name='keep_probabilty')
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name='GTLabel')
Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)
Net.build(image, NUM_CLASSES, keep_prob)
Loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob, name='Loss'))
trainable_var = tf.trainable_variables()
train_op = train(Loss, trainable_var)
TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, GTLabelDir=Train_Label_Dir, BatchSize=Batch_Size)
if UseValidationSet:
ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, GTLabelDir=Valid_Labels_Dir, BatchSize=Batch_Size)
sess = tf.Session()
print('Setting up Saver...')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_dir)
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored...')
f = open(TrainLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
if UseValidationSet:
f = open(ValidLossTxtFile, 'w')
f.write(('Iteration\tloss\t Learning Rate=' + str(learning_rate)))
f.close()
for itr in range(MAX_ITERATION):
(Images, GTLabels) = TrainReader.ReadAndAugmentNextBatch()
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 0.5}
sess.run(train_op, feed_dict=feed_dict)
if (((itr % 500) == 0) and (itr > 0)):
print(('Saving Model to file in ' + logs_dir))
saver.save(sess, (logs_dir + 'model.ckpt'), itr)
if ((itr % 10) == 0):
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1}
TLoss = sess.run(Loss, feed_dict=feed_dict)
print(((('Step ' + str(itr)) + ' Train Loss=') + str(TLoss)))
with open(TrainLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(TLoss)))
f.close()
if (UseValidationSet and ((itr % 2000) == 0)):
SumLoss = np.float64(0.0)
NBatches = np.int(np.ceil((ValidReader.NumFiles / ValidReader.BatchSize)))
print((('Calculating Validation on ' + str(ValidReader.NumFiles)) + ' Images'))
for i in range(NBatches):
(Images, GTLabels) = ValidReader.ReadNextBatchClean()
feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1.0}
TLoss = sess.run(Loss, feed_dict=feed_dict)
SumLoss += TLoss
NBatches += 1
SumLoss /= NBatches
print(('Validation Loss: ' + str(SumLoss)))
with open(ValidLossTxtFile, 'a') as f:
f.write(((('\n' + str(itr)) + '\t') + str(SumLoss)))
f.close()
|
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split('/')[(- 1)]
filepath = os.path.join(dir_path, filename)
if (not os.path.exists(filepath)):
raise IOError('VGG Model not found!')
data = scipy.io.loadmat(filepath)
return data
|
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
filename = url_name.split('/')[(- 1)]
filepath = os.path.join(dir_path, filename)
if (not os.path.exists(filepath)):
def _progress(count, block_size, total_size):
sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
|
def save_image(image, save_dir, name, mean=None):
'\n Save image by unprocessing if mean given else just save\n :param mean:\n :param image:\n :param save_dir:\n :param name:\n :return:\n '
if mean:
image = unprocess_image(image, mean)
misc.imsave(os.path.join(save_dir, (name + '.png')), image)
|
def get_variable(weights, name):
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
|
def weight_variable(shape, stddev=0.02, name=None):
initial = tf.truncated_normal(shape, stddev=stddev)
if (name is None):
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
|
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if (name is None):
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
|
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
|
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.bias_add(conv, bias)
|
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='SAME')
return tf.nn.bias_add(conv, b)
|
def conv2d_transpose_strided(x, W, b, output_shape=None, stride=2):
if (output_shape is None):
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='SAME')
return tf.nn.bias_add(conv, b)
|
def leaky_relu(x, alpha=0.0, name=''):
return tf.maximum((alpha * x), x, name)
|
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
|
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=0.0001, beta=0.75)
|
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-05):
'\n Code taken from http://stackoverflow.com/a/34634291/2267819\n '
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0), trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02), trainable=True)
(batch_mean, batch_var) = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return (tf.identity(batch_mean), tf.identity(batch_var))
(mean, var) = tf.cond(phase_train, mean_var_with_update, (lambda : (ema.average(batch_mean), ema.average(batch_var))))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
|
def process_image(image, mean_pixel):
return (image - mean_pixel)
|
def unprocess_image(image, mean_pixel):
return (image + mean_pixel)
|
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
'\n Modified implementation from github ry?!\n '
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[(- 1)]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[(- 1)] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1], padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[(- 1)]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
'\n :param tensor: 4D tensor input\n :param name: name of the operation\n :return: local response normalized tensor - not using batch normalization :(\n '
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=0.0001, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if (down_stride or up_stride):
first_stride = 2
else:
first_stride = 1
with tf.variable_scope(('res%s' % name)):
if (in_chans == out_chan2):
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride, name=('res%s_branch1' % name))
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name=('res%s_branch1' % name))
b1 = bn(b1, ('bn%s_branch1' % name), ('scale%s_branch1' % name))
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name=('res%s_branch2a' % name))
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name=('res%s_branch2a' % name))
b2 = bn(b2, ('bn%s_branch2a' % name), ('scale%s_branch2a' % name))
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name=('res%s_branch2b' % name))
b2 = bn(b2, ('bn%s_branch2b' % name), ('scale%s_branch2b' % name))
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name=('res%s_branch2c' % name))
b2 = bn(b2, ('bn%s_branch2c' % name), ('scale%s_branch2c' % name))
x = (b1 + b2)
return tf.nn.relu(x, name='relu')
|
def add_to_regularization_and_summary(var):
if (var is not None):
tf.summary.histogram(var.op.name, var)
tf.add_to_collection('reg_loss', tf.nn.l2_loss(var))
|
def add_activation_summary(var):
if (var is not None):
tf.summary.histogram((var.op.name + '/activation'), var)
tf.summary.scalar((var.op.name + '/sparsity'), tf.nn.zero_fraction(var))
|
def add_gradient_summary(grad, var):
if (grad is not None):
tf.summary.histogram((var.op.name + '/gradient'), grad)
|
def dict_to_list_of_overrides(d: dict):
return [f'{k}={v}' for (k, v) in flatten_dict(d, sep='.').items()]
|
def flatten_dict(d: dict, sep: str='/', pre='') -> dict:
return ({(((pre + sep) + k) if pre else k): v for (kk, vv) in d.items() for (k, v) in flatten_dict(vv, sep, kk).items()} if isinstance(d, dict) else {pre: d})
|
def add_to_outdirs_file(outdir: os.PathLike):
with open(OUTDIRS_FILE, 'a') as f:
f.write((Path(outdir).resolve.as_posix() + '\n'))
|
def get_jobdir(cfg: DictConfig, job_type: str) -> Path:
jobdir = Path(cfg.get('outdir', os.getcwd())).joinpath(job_type)
jobdir.mkdir(exist_ok=True, parents=True)
assert (jobdir is not None)
add_to_outdirs_file(jobdir)
return jobdir
|
def list_to_str(x: list) -> str:
if isinstance(x[0], int):
return '-'.join([str(int(i)) for i in x])
elif isinstance(x[0], float):
return '-'.join([f'{i:2.1f}' for i in x])
else:
return '-'.join([str(i) for i in x])
|
@dataclass
class State():
x: Any
v: Any
beta: Any
|
@dataclass
@rich.repr.auto
class BaseConfig(ABC):
@abstractmethod
def to_str(self) -> str:
pass
def to_json(self) -> str:
return json.dumps(self.__dict__)
def get_config(self) -> dict:
return asdict(self)
def asdict(self) -> dict:
return asdict(self)
def to_dict(self) -> dict:
return deepcopy(self.__dict__)
def to_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
json.dump(self.to_json(), f, indent=4)
def from_file(self, fpath: os.PathLike) -> None:
with open(fpath, 'w') as f:
with open(fpath, 'r') as f:
config = json.load(f)
self.__init__(**config)
def __getitem__(self, key):
return super().__getattribute__(key)
|
@dataclass
class Charges():
intQ: Any
sinQ: Any
|
@dataclass
class LatticeMetrics():
plaqs: Any
charges: Charges
p4x4: Any
def asdict(self) -> dict:
return {'plaqs': self.plaqs, 'sinQ': self.charges.sinQ, 'intQ': self.charges.intQ, 'p4x4': self.p4x4}
|
@dataclass
class EnvConfig():
def __post_init__(self):
import socket
dist_env = udist.query_environment()
self.rank = dist_env['rank']
self.local_rank = dist_env['local_rank']
self.world_size = dist_env['world_size']
try:
self.hostname = socket.gethostname()
self.addr = socket.gethostbyaddr(self.hostname)[0]
except Exception:
self.hostname = 'localhost'
self.addr = socket.gethostbyaddr(self.hostname)[0]
if self.addr.startswith('x3'):
self.machine = 'Polaris'
self.nodefile = os.environ.get('PBS_NODEFILE', None)
elif self.addr.startswith('x1'):
self.machine = 'Sunspot'
self.nodefile = os.environ.get('PBS_NODEFILE', None)
elif self.addr.startswith('thetagpu'):
self.machine = 'ThetaGPU'
self.nodefile = os.environ.get('COBALT_NODEFILE', None)
else:
self.machine = self.addr
self.nodefile = None
self.env = {k: v for (k, v) in dict(os.environ).items() if ((k not in ENV_FILTERS) and (not k.startswith('_ModuleTable')) and (not k.startswith('BASH_FUNC_')))}
|
@dataclass
class wandbSetup(BaseConfig):
id: Optional[str] = None
group: Optional[str] = None
save_code: Optional[bool] = True
sync_tensorboard: Optional[bool] = True
tags: Optional[Sequence[str]] = None
mode: Optional[str] = 'online'
resume: Optional[str] = 'allow'
entity: Optional[str] = 'l2hmc-qcd'
project: Optional[str] = 'l2hmc-qcd'
settings: Optional[dict] = field(default_factory=dict)
def __post_init__(self):
if (self.settings is None):
self.settings = {'start_method': 'thread'}
def to_str(self) -> str:
return ''
|
@dataclass
class wandbConfig(BaseConfig):
setup: wandbSetup
def to_str(self) -> str:
return self.to_json()
|
@dataclass
class NetWeight(BaseConfig):
'Object for selectively scaling different components of learned fns.\n\n Explicitly,\n - s: scales the v (x) scaling function in the v (x) updates\n - t: scales the translation function in the update\n - q: scales the force (v) transformation function in the v (x) updates\n '
s: float = field(default=1.0)
t: float = field(default=1.0)
q: float = field(default=1.0)
def to_dict(self):
return {'s': self.s, 't': self.t, 'q': self.q}
def to_str(self):
return f's{self.s:2.1f}t{self.t:2.1f}q{self.t:2.1f}'
|
@dataclass
class NetWeights(BaseConfig):
'Object for selectively scaling different components of x, v networks.'
x: NetWeight = NetWeight(1.0, 1.0, 1.0)
v: NetWeight = NetWeight(1.0, 1.0, 1.0)
def to_str(self):
return f'nwx-{self.x.to_str()}-nwv-{self.v.to_str()}'
def to_dict(self):
return {'x': self.x.to_dict(), 'v': self.v.to_dict()}
def __post_init__(self):
if (not isinstance(self.x, NetWeight)):
self.x = NetWeight(**self.x)
if (not isinstance(self.v, NetWeight)):
self.v = NetWeight(**self.v)
|
@dataclass
class LearningRateConfig(BaseConfig):
'Learning rate configuration object.'
lr_init: float = 0.001
mode: str = 'auto'
monitor: str = 'loss'
patience: int = 5
cooldown: int = 0
warmup: int = 1000
verbose: bool = True
min_lr: float = 1e-06
factor: float = 0.98
min_delta: float = 0.0001
clip_norm: float = 2.0
def to_str(self):
return f'lr-{self.lr_init:3.2f}'
|
@dataclass
class Steps(BaseConfig):
nera: int
nepoch: int
test: int
log: int = 100
print: int = 200
extend_last_era: Optional[int] = None
def __post_init__(self):
if (self.extend_last_era is None):
self.extend_last_era = 1
self.total = (self.nera * self.nepoch)
freq = int((self.nepoch // 20))
self.log = (max(1, freq) if (self.log is None) else self.log)
self.print = (max(1, freq) if (self.print is None) else self.print)
assert isinstance(self.log, int)
assert isinstance(self.print, int)
def to_str(self) -> str:
return f'nera-{self.nera}_nepoch-{self.nepoch}'
def update(self, nera: Optional[int]=None, nepoch: Optional[int]=None, test: Optional[int]=None, log: Optional[int]=None, print: Optional[int]=None, extend_last_era: Optional[int]=None) -> Steps:
return Steps(nera=(self.nera if (nera is None) else nera), nepoch=(self.nepoch if (nepoch is None) else nepoch), test=(self.test if (test is None) else test), log=(self.log if (log is None) else log), print=(self.print if (print is None) else print), extend_last_era=(self.extend_last_era if (extend_last_era is None) else extend_last_era))
|
@dataclass
class ConvolutionConfig(BaseConfig):
filters: Optional[Sequence[int]] = None
sizes: Optional[Sequence[int]] = None
pool: Optional[Sequence[int]] = None
def __post_init__(self):
if (self.filters is None):
return
if (self.sizes is None):
logger.warning('Using default filter size of 2')
self.sizes = list((len(self.filters) * [2]))
if (self.pool is None):
logger.warning('Using default pooling size of 2')
self.pool = (len(self.filters) * [2])
assert (len(self.filters) == len(self.sizes))
assert (len(self.filters) == len(self.pool))
assert (self.pool is not None)
def to_str(self) -> str:
if (self.filters is None):
return 'conv-None'
if (len(self.filters) > 0):
outstr = [list_to_str(list(self.filters))]
if (self.sizes is not None):
outstr.append(list_to_str(list(self.sizes)))
if (self.pool is not None):
outstr.append(list_to_str(list(self.pool)))
return '-'.join(['conv', '_'.join(outstr)])
return ''
|
@dataclass
class NetworkConfig(BaseConfig):
units: Sequence[int]
activation_fn: str
dropout_prob: float
use_batch_norm: bool = True
def to_str(self):
ustr = '-'.join([str(int(i)) for i in self.units])
dstr = f'dp-{self.dropout_prob:2.1f}'
bstr = f'bn-{self.use_batch_norm}'
return '-'.join(['net', '_'.join([ustr, dstr, bstr])])
|
@dataclass
class DynamicsConfig(BaseConfig):
nchains: int
group: str
latvolume: List[int]
nleapfrog: int
eps: float = 0.01
eps_hmc: float = 0.01
use_ncp: bool = True
verbose: bool = True
eps_fixed: bool = False
use_split_xnets: bool = True
use_separate_networks: bool = True
merge_directions: bool = True
def to_str(self) -> str:
latstr = '-'.join([str(i) for i in self.xshape[1:]])
lfstr = f'nlf-{self.nleapfrog}'
splitstr = f'xsplit-{self.use_split_xnets}'
sepstr = f'sepnets-{self.use_separate_networks}'
mrgstr = f'merge-{self.merge_directions}'
return '/'.join([self.group, latstr, lfstr, splitstr, sepstr, mrgstr])
def __post_init__(self):
assert (self.group.upper() in ['U1', 'SU3'])
if (self.eps_hmc is None):
self.eps_hmc = (1.0 / self.nleapfrog)
if (self.group.upper() == 'U1'):
self.dim = 2
(self.nt, self.nx) = self.latvolume
self.xshape = (self.nchains, self.dim, *self.latvolume)
self.vshape = (self.nchains, self.dim, *self.latvolume)
assert (len(self.xshape) == 4)
assert (len(self.latvolume) == 2)
self.xdim = int(np.cumprod(self.xshape[1:])[(- 1)])
elif (self.group.upper() == 'SU3'):
self.dim = 4
self.link_shape = (3, 3)
self.vec_shape = 8
(self.nt, self.nx, self.ny, self.nz) = self.latvolume
self.xshape = (self.nchains, self.dim, *self.latvolume, *self.link_shape)
self.vshape = (self.nchains, self.dim, *self.latvolume, self.vec_shape)
assert (len(self.xshape) == 8)
assert (len(self.vshape) == 7)
assert (len(self.latvolume) == 4)
self.xdim = int(np.cumprod(self.xshape[1:])[(- 1)])
else:
raise ValueError('Expected `group` to be one of `"U1", "SU3"`')
|
@dataclass
class LossConfig(BaseConfig):
use_mixed_loss: bool = False
charge_weight: float = 0.01
rmse_weight: float = 0.0
plaq_weight: float = 0.0
aux_weight: float = 0.0
def to_str(self) -> str:
return '_'.join([f'qw-{self.charge_weight:2.1f}', f'pw-{self.plaq_weight:2.1f}', f'rw-{self.rmse_weight:2.1f}', f'aw-{self.aux_weight:2.1f}', f'mixed-{self.use_mixed_loss}'])
|
@dataclass
class InputSpec(BaseConfig):
xshape: Sequence[int]
xnet: Optional[Dict[(str, (int | Sequence[int]))]] = None
vnet: Optional[Dict[(str, (int | Sequence[int]))]] = None
def to_str(self):
return '-'.join([str(i) for i in self.xshape])
def __post_init__(self):
if (len(self.xshape) == 2):
self.xdim = self.xshape[(- 1)]
self.vshape = self.xshape
self.vdim = self.xshape[(- 1)]
elif (len(self.xshape) > 2):
self.xdim: int = np.cumprod(self.xshape[1:])[(- 1)]
lat_shape = self.xshape[:(- 2)]
vd = ((self.xshape[(- 1)] ** 2) - 1)
self.vshape: Sequence[int] = (*lat_shape, vd)
self.vdim: int = np.cumprod(self.vshape[1:])[(- 1)]
else:
raise ValueError(f'Invalid `xshape`: {self.xshape}')
if (self.xnet is None):
self.xnet = {'x': self.xshape, 'v': self.xshape}
if (self.vnet is None):
self.vnet = {'x': self.xshape, 'v': self.xshape}
|
@dataclass
class FlopsProfiler():
enabled: bool = False
profile_step: int = 1
module_depth: int = (- 1)
top_modules: int = 1
detailed: bool = True
output_file: Optional[((os.PathLike | str) | Path)] = None
def __post_init__(self):
pass
|
@dataclass
class OptimizerConfig():
type: str
params: Optional[dict] = field(default_factory=dict)
|
@dataclass
class fp16Config():
enabled: bool
auto_cast: bool = True
fp16_master_weights_and_grads: bool = False
min_loss_scale: float = 0.0
|
@dataclass
class CommsLogger():
enabled: bool
verbose: bool = True
prof_all: bool = True
debug: bool = False
|
@dataclass
class AutoTuning():
enabled: bool
arg_mappings: Optional[dict] = field(default_factory=dict)
|
@dataclass
class ZeroOptimization():
stage: int
|
@dataclass
class ExperimentConfig(BaseConfig):
wandb: Any
steps: Steps
framework: str
loss: LossConfig
network: NetworkConfig
conv: ConvolutionConfig
net_weights: NetWeights
dynamics: DynamicsConfig
learning_rate: LearningRateConfig
annealing_schedule: AnnealingSchedule
gradient_accumulation_steps: int = 1
restore: bool = True
save: bool = True
c1: float = 0.0
port: str = '2345'
compile: bool = True
profile: bool = False
init_aim: bool = True
init_wandb: bool = True
use_wandb: bool = True
use_tb: bool = False
debug_mode: bool = False
default_mode: bool = True
print_config: bool = True
precision: str = 'float32'
ignore_warnings: bool = True
backend: str = 'hvd'
seed: Optional[int] = None
ds_config_path: Optional[Any] = None
name: Optional[str] = None
name: Optional[str] = None
width: Optional[int] = None
nchains: Optional[int] = None
compression: Optional[str] = None
def __post_init__(self):
self.env_config = EnvConfig()
if (self.seed is None):
import numpy as np
self.seed = np.random.randint(0)
logger.warning(f'No seed specified, using random seed: {self.seed}')
self.env = EnvConfig()
self.ds_config = {}
self.xdim = self.dynamics.xdim
self.xshape = self.dynamics.xshape
self.micro_batch_size = self.dynamics.nchains
self.global_batch_size = ((self.env.world_size * self.micro_batch_size) * self.gradient_accumulation_steps)
if (self.ds_config_path is None):
fpath = Path(CONF_DIR).joinpath('ds_config.yaml')
self.ds_config_path = fpath.resolve().as_posix()
if (self.precision in FP16_SYNONYMS):
self.precision = 'fp16'
elif (self.precision in BF16_SYNONYMS):
self.precision = 'bf16'
elif (self.precision in FP32_SYNONYMS):
self.precision = 'float32'
elif (self.precision in FP64_SYNONYMS):
self.precision = 'float64'
w = int(os.environ.get('COLUMNS', 200))
self.width = (w if (self.width is None) else self.width)
if (self.framework in SYNONYMS['tensorflow']):
self.backend = 'hvd'
elif (self.framework in SYNONYMS['pytorch']):
if (self.backend is None):
logger.warning('Backend not specified, using DDP')
self.backend = 'DDP'
assert (self.backend.lower() in ['hvd', 'horovod', 'ddp', 'ds', 'deepspeed'])
else:
raise ValueError(f'Unexpected value for framework: {self.framework}')
if self.debug_mode:
self.compile = False
self.annealing_schedule.setup(nera=self.steps.nera, nepoch=self.steps.nepoch)
def load_ds_config(self, fpath: Optional[os.PathLike]=None) -> dict:
fname = (self.ds_config_path if (fpath is None) else fpath)
assert (fname is not None)
ds_config_path = Path(fname)
logger.info(f'Loading DeepSpeed Config from: {ds_config_path.as_posix()}')
if (ds_config_path.suffix == '.json'):
with ds_config_path.open('r') as f:
ds_config = json.load(f)
return ds_config
if (ds_config_path.suffix == '.yaml'):
import yaml
with ds_config_path.open('r') as stream:
ds_config = dict(yaml.safe_load(stream))
return ds_config
raise TypeError('Unexpected FileType')
def set_ds_config(self, ds_config: dict) -> None:
self.ds_config = ds_config
def to_str(self) -> str:
dynstr = self.dynamics.to_str()
constr = self.conv.to_str()
netstr = self.network.to_str()
return '/'.join([dynstr, constr, netstr, self.framework])
def get_checkpoint_dir(self) -> Path:
return Path(CHECKPOINTS_DIR).joinpath(self.to_str())
def rank(self):
if (self.framework in SYNONYMS['pytorch']):
if (self.backend.lower() in SYNONYMS['horovod']):
import horovod.torch as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank()
elif (self.backend.lower() in SYNONYMS['DDP']):
return int(os.environ.get('RANK', 0))
elif (self.backend.lower() in SYNONYMS['deepspeed']):
import torch.distributed as dist
return dist.get_rank()
elif (self.framework in SYNONYMS['tensorflow']):
import horovod.tensorflow as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank()
|
@dataclass
class AnnealingSchedule(BaseConfig):
beta_init: float
beta_final: Optional[float] = 1.0
dynamic: bool = False
def to_str(self) -> str:
return f'bi-{self.beta_init}_bf-{self.beta_final}'
def __post_init__(self):
if ((self.beta_final is None) or (self.beta_final < self.beta_init)):
logger.warning(f'''AnnealingSchedule.beta_final must be >= {self.beta_init}, but received: {self.beta_final}.
Setting self.beta_final to {self.beta_init}''')
self.beta_final = float(self.beta_init)
assert (isinstance(self.beta_final, float) and (self.beta_final >= self.beta_init))
def update(self, beta_init: Optional[float]=None, beta_final: Optional[float]=None):
logger.warning('Updating annealing schedule!')
if (beta_init is not None):
logger.warning(f'annealing_schedule.beta_init = {beta_init:.3f}')
self.beta_init = beta_init
if (beta_final is not None):
logger.warning(f'annealing_schedule.beta_final = {beta_final:.3f}')
self.beta_final = beta_final
def setup(self, nera: Optional[int]=None, nepoch: Optional[int]=None, steps: Optional[Steps]=None, beta_init: Optional[float]=None, beta_final: Optional[float]=None) -> dict:
if (nera is None):
assert (steps is not None)
nera = steps.nera
if (nepoch is None):
assert (steps is not None)
nepoch = steps.nepoch
if (beta_init is None):
beta_init = self.beta_init
if (beta_final is None):
beta_final = (self.beta_final if (self.beta_final is not None) else self.beta_init)
self.betas = np.linspace(beta_init, beta_final, nera)
total = (steps.total if (steps is not None) else 1)
self._dbeta = ((beta_final - beta_init) / total)
self.beta_dict = {str(era): self.betas[era] for era in range(nera)}
return self.beta_dict
|
@dataclass
class Annealear():
'Dynamically adjust annealing schedule during training.'
schedule: AnnealingSchedule
patience: int
min_delta: Optional[float] = None
def __post_init__(self):
self.wait = 0
self.best = np.Inf
self._current_era = 0
self._current_beta = self.schedule.beta_init
self._epoch = 0
self._count = 0
self.betas = []
self.loss = []
self.losses = {}
self._reset()
def _reset(self):
self.wait = 0
def update(self, loss: float):
self._epoch += 1
self.loss.append(loss)
@staticmethod
def avg_diff(y: list[float], x: Optional[list[float]]=None, *, drop: Optional[(int | float)]=None) -> float:
'Returns (1/n) β [Ξ΄y/Ξ΄x].'
if (x is not None):
assert (len(x) == len(y))
if (drop is not None):
if isinstance(drop, int):
if (drop <= 1):
raise ValueError('Expected `drop` to be an int > 1')
y = y[drop:]
if (x is not None):
x = x[drop:]
elif isinstance(drop, float):
if (drop <= 1.0):
raise ValueError('Expected `drop` to be a float > 1.')
frac = (drop * len(y))
y = y[frac:]
if (x is not None):
x = x[frac:]
else:
raise ValueError('Expected drop to be one of `int` or `float`.')
dyavg = np.subtract(y[1:], y[:(- 1)]).mean()
if (x is not None):
dxavg = np.subtract(x[1:], x[:(- 1)]).mean()
return (dyavg / dxavg)
return dyavg
def start_epoch(self, era: int, beta: float):
self.losses[f'{era}'] = {'beta': beta, 'loss': []}
self._prev_beta = self.betas[(- 1)]
self._current_era = era
self._current_beta = beta
self.betas.append(beta)
self._prev_best = np.Inf
if ((era - 1) in self.losses.keys()):
self._prev_best = np.min(self.losses[str((era - 1))]['loss'])
def end_epoch(self, losses: list[float], era: Optional[int]=None, beta: Optional[float]=None, drop: Optional[(int | float)]=None) -> float:
current_era = (self._current_era if (era is None) else era)
current_beta = (self._current_beta if (beta is None) else beta)
prev_beta = self._prev_beta
new_beta = (current_beta + self.schedule._dbeta)
self.losses[f'{current_era}'] = {'beta': current_beta, 'loss': losses}
new_best = np.min(losses)
avg_slope = self.avg_diff(losses, drop=drop)
if ((new_best < self._prev_best) or (avg_slope < 0)):
return new_beta
current_beta_count = Counter(self.betas).get(current_beta)
if ((current_beta_count is not None) and isinstance(current_beta_count, int) and (current_beta_count > self.patience)):
return prev_beta
return current_beta
|
def get_config(overrides: Optional[list[str]]=None):
from hydra import initialize_config_dir, compose
from hydra.core.global_hydra import GlobalHydra
GlobalHydra.instance().clear()
overrides = ([] if (overrides is None) else overrides)
with initialize_config_dir(CONF_DIR.absolute().as_posix(), version_base=None):
cfg = compose('config', overrides=overrides)
return cfg
|
def get_experiment(overrides: Optional[list[str]]=None, build_networks: bool=True, keep: Optional[(str | list[str])]=None, skip: Optional[(str | list[str])]=None):
cfg = get_config(overrides)
if (cfg.framework == 'pytorch'):
from l2hmc.experiment.pytorch.experiment import Experiment
return Experiment(cfg, keep=keep, skip=skip, build_networks=build_networks)
elif (cfg.framework == 'tensorflow'):
from l2hmc.experiment.tensorflow.experiment import Experiment
return Experiment(cfg, keep=keep, skip=skip, build_networks=build_networks)
else:
raise ValueError(f'Unexpected value for `cfg.framework: {cfg.framework}')
|
@dataclass
class DiffusionConfig():
'\n Diffusion Config.\n\n Args:\n - `log_likelihood_fn`: Callable[[torch.Tensor], torch.Tensor]:\n - Your log-likelihood function to be sampled. Must be defined in\n terms of a 1D parameter array `x` and a number of dimensions\n `dim`. Some example log-likelihood functions are provided in the\n examples folder.\n - `dim`: int\n - Number of dimensions of the likelihood function\n - `low_bound`: torch.Tensor\n - Array of lower bounds on each parameter\n - `high_bound`: torch.Tensor\n - Array of upper bounds on each parameter\n - `initial_samples`: torch.Tensor\n - Array of samples to be used as the starting point for the\n algorithm. For a likelihood function with widely separated modes,\n several samples from each mode must be supplied here to allow the\n diffusion model to jump between them. For functions with just one\n mode, it is not necessary to provide anything more than an\n initial sample as a starting point. See the examples folder for\n more details.\n - `retrains`: int\n - Number of times to retrain the diffusion model. More retrains\n will improve the performance at the cost of increased runtime.\n - `samples_per_retrain`:\n - Number of diffusion samples to generate before retraining the\n diffusion model.\n - `num_samples_total = retrains * samples_per_retrain`\n - `outdir`: os.PathLike\n - Where to save results\n - `nsteps`: int (default = 20)\n - Number of noising steps in the forward / reverse diffusion\n process.\n - If the diffusion model is failing to closely reproduce the\n target, try increasing.\n - If training is too slow, try decreasing.\n - `sigma`: float (default = 0.05)\n - Width of the pure Metropolis-Hastings Gaussian Proposal.\n - If algorithm stuck in local minima or not exploring enough of\n parameter space, try increasing.\n - `diffusion_prob`: float (default = 0.5)\n - Probability of drawing from the diffusion model. Higher values\n are generally preferred for multi-modal functions where jumping\n between modes with the diffusion samples is required.\n - `bins`: int (default = 20)\n - Number of bins used in 1D histograms of each parameter to\n calculate the Q proposal function weights. If diffusion\n acceptance rate remains very low over many samples, try\n increasing this value for greater resolution in the Q factor\n calculation, though doing so will increase retrain time.\n - `noise_width` (default = 0.05)`\n - Width of noise after performing forward diffusion process. If\n diffusion model is failing to reproduce sharp modes of the target\n distribution (check "diffusion_check.pdf" in the relevant\n outdir), try reducing this value.\n - `beta_1`: float\n - How much noise to add in the first step of the forward diffusion\n process. We use a linear variance schedule, increasing from\n `beta_1` to `beta_2`. If diffusion model is failing to reproduce\n sharp modes of the target distribution (check\n "diffusion_check.pdf" in the relevant outdir), try increasing\n this value.\n - `beta_2`: float\n - How much noise to add at the last step of the forward diffusion\n process. We use a linear variance schedule, increasing from\n `beta_1` to `beta_2`. If diffusion model is failing to reproduce\n sharp modes of the target distribution (check\n "diffusion_check.pdf" in the relevant outdir), try increasing\n this value.\n - `plot_initial`: bool\n - Whether to plot diffusion_check.pdf in the outdir.\n '
log_likelihood_fn: Callable[([torch.Tensor], torch.Tensor)]
dim: int
low_bound: torch.Tensor
high_bound: torch.Tensor
initial_samples: torch.Tensor
train_iters: int
samples_per_retrain: int
outdir: os.PathLike
nsteps: int = 20
sigma: float = 0.3
diffusion_prob: float = 0.5
bins: int = 20
|
class DummyTqdmFile(object):
' Dummy file-like that will write to tqdm\n https://github.com/tqdm/tqdm/issues/313\n '
file = None
def __init__(self, file):
self.file = file
def write(self, x):
tqdm.tqdm.write(x, file=self.file, end='\n')
def flush(self):
return getattr(self.file, 'flush', (lambda : None))()
|
def get_rich_logger(name: Optional[str]=None, level: str='INFO') -> logging.Logger:
log = logging.getLogger(name)
log.handlers = []
from l2hmc.utils.rich import get_console
console = get_console(markup=True, redirect=(WORLD_SIZE > 1))
handler = RichHandler(level, rich_tracebacks=False, console=console, show_path=False, enable_link_path=False)
log.handlers = [handler]
log.setLevel(level)
return log
|
def get_file_logger(name: Optional[str]=None, level: str='INFO', rank_zero_only: bool=True, fname: Optional[str]=None) -> logging.Logger:
import logging
fname = ('l2hmc' if (fname is None) else fname)
log = logging.getLogger(name)
if rank_zero_only:
fh = logging.FileHandler(f'{fname}.log')
if (RANK == 0):
log.setLevel(level)
fh.setLevel(level)
else:
log.setLevel('CRITICAL')
fh.setLevel('CRITICAL')
else:
fh = logging.FileHandler(f'{fname}-{RANK}.log')
log.setLevel(level)
fh.setLevel(level)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
return log
|
def get_logger(name: Optional[str]=None, level: str='INFO', rank_zero_only: bool=True, **kwargs) -> logging.Logger:
log = logging.getLogger(name)
from l2hmc.utils.rich import get_console, is_interactive
if rank_zero_only:
if (RANK != 0):
log.setLevel('CRITICAL')
else:
log.setLevel(level)
if (RANK == 0):
console = get_console(markup=True, redirect=(WORLD_SIZE > 1), **kwargs)
if console.is_jupyter:
console.is_jupyter = False
use_markup = ((WORLD_SIZE == 1) and (not is_interactive()))
log.addHandler(RichHandler(omit_repeated_times=False, level=level, console=console, show_time=True, show_level=True, show_path=True, markup=use_markup, enable_link_path=use_markup))
log.setLevel(level)
if ((len(log.handlers) > 1) and all(((i == log.handlers[0]) for i in log.handlers))):
log.handlers = [log.handlers[0]]
return log
|
def get_experiment(cfg: DictConfig, keep: Optional[(str | list[str])]=None, skip: Optional[(str | list[str])]=None):
framework = cfg.get('framework', None)
os.environ['RUNDIR'] = os.getcwd()
if (framework in ['tf', 'tensorflow']):
cfg.framework = 'tensorflow'
from ezpz import setup_tensorflow
_ = setup_tensorflow(cfg.precision)
from l2hmc.experiment.tensorflow.experiment import Experiment
experiment = Experiment(cfg, keep=keep, skip=skip)
return experiment
elif (framework in ['pt', 'pytorch', 'torch']):
import torch
cfg.framework = 'pytorch'
from ezpz import setup_torch
_ = setup_torch(seed=cfg.seed, backend=cfg.get('backend', 'DDP'), port=cfg.get('port', '2345'))
precision = cfg.get('precision', None)
if ((precision is not None) and (precision in {'float64', 'fp64', 'f64', '64', 'double'})):
LOG.warning(f'setting default dtype: {precision}')
torch.set_default_dtype(torch.float64)
from l2hmc.experiment.pytorch.experiment import Experiment
experiment = Experiment(cfg, keep=keep, skip=skip)
return experiment
raise ValueError('Framework must be specified, one of: [pytorch, tensorflow]')
|
def run(cfg: DictConfig, overrides: Optional[list[str]]=None) -> str:
from l2hmc.utils.plot_helpers import set_plot_style
set_plot_style()
import matplotlib.pyplot as plt
import opinionated
plt.style.use(opinionated.STYLES['opinionated_min'])
if (overrides is not None):
from l2hmc.configs import get_config
cfg.update(get_config(overrides))
ex = get_experiment(cfg)
if ex.trainer._is_orchestrator:
try:
from omegaconf import OmegaConf
from rich import print_json
conf = OmegaConf.structured(ex.config)
cdict = OmegaConf.to_container(conf)
print_json(json.dumps(cdict))
except Exception as e:
LOG.exception(e)
LOG.warning('Continuing!')
should_train: bool = ((ex.config.steps.nera > 0) and (ex.config.steps.nepoch > 0))
nchains_eval = max(2, int((ex.config.dynamics.xshape[0] // 4)))
if should_train:
tstart = time.time()
_ = ex.train()
LOG.info(f'Training took: {(time.time() - tstart):.5f}s')
if (ex.trainer._is_orchestrator and (ex.config.steps.test > 0)):
LOG.info('Evaluating trained model')
estart = time.time()
_ = ex.evaluate(job_type='eval', nchains=nchains_eval)
LOG.info(f'Evaluation took: {(time.time() - estart):.5f}s')
if (ex.trainer._is_orchestrator and (ex.config.steps.test > 0)):
LOG.info('Running generic HMC for comparison')
hstart = time.time()
_ = ex.evaluate(job_type='hmc', nchains=nchains_eval)
LOG.info(f'HMC took: {(time.time() - hstart):.5f}s')
from l2hmc.utils.plot_helpers import measure_improvement
improvement = measure_improvement(experiment=ex, title=f'{ex.config.framework}')
if (ex.config.init_wandb and (wandb.run is not None)):
wandb.run.log({'model_improvement': improvement})
LOG.critical(f'Model improvement: {improvement:.8f}')
if (wandb.run is not None):
LOG.critical(f'π {wandb.run.name}')
LOG.critical(f'π {wandb.run.url}')
LOG.critical(f'π/: {wandb.run.dir}')
artifact = wandb.Artifact('logdir', type='directory')
rundir = Path(os.getcwd())
dirs_ = ('pngs', 'network_diagrams', '.hydra')
files_ = ('__main__.log', 'main_debug.log', 'main.log', 'model_improvement.svg', 'model_improvement.txt', 'plots.txt')
for file in files_:
if (fpath := rundir.joinpath(file)).is_file():
LOG.info(f'Adding {file} to W&B artifact...')
artifact.add_file(fpath.as_posix())
for dir_ in dirs_:
if (dpath := rundir.joinpath(dir_)).is_dir():
LOG.info(f'Adding {dir_} to W&B artifact...')
artifact.add_dir(dpath.as_posix())
LOG.info(f'Logging {artifact} to W&B')
wandb.run.log_artifact(artifact)
if ex.trainer._is_orchestrator:
try:
ex.visualize_model()
except Exception:
LOG.error('Unable to make visuals for model, continuing!')
LOG.critical(f'experiment dir: {Path(ex._outdir).as_posix()}')
return Path(ex._outdir).as_posix()
|
def build_experiment(overrides: Optional[(str | list[str])]=None):
import warnings
warnings.filterwarnings('ignore')
from l2hmc.configs import get_config
if isinstance(overrides, str):
overrides = [overrides]
cfg = get_config(overrides)
return get_experiment(cfg=cfg)
|
@hydra.main(version_base=None, config_path='./conf', config_name='config')
def main(cfg: DictConfig):
output = run(cfg)
fw = cfg.get('framework', None)
be = cfg.get('backend', None)
if ((str(fw).lower() in {'pt', 'torch', 'pytorch'}) and (str(be).lower() == 'ddp')):
from l2hmc.utils.dist import cleanup
cleanup()
return output
|
def grab_tensor(x: Any) -> ((np.ndarray | ScalarLike) | None):
if (x is None):
return None
if isinstance(x, (int, float, bool, np.floating)):
return x
if isinstance(x, list):
if isinstance(x[0], torch.Tensor):
return grab_tensor(torch.stack(x))
elif isinstance(x[0], np.ndarray):
return np.stack(x)
else:
import tensorflow as tf
if isinstance(x[0], tf.Tensor):
return grab_tensor(tf.stack(x))
elif isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif callable(getattr(x, 'numpy', None)):
assert callable(getattr(x, 'numpy'))
return x.numpy()
raise ValueError
|
def dict_to_str(d: dict, grab: Optional[bool]=None) -> str:
if grab:
return '\n'.join([f'''{k}: {getattr(v, 'shape', None)} {getattr(v, 'dtype', None)}
{grab_tensor(v)}''' for (k, v) in d.items()])
return '\n'.join([f'{k}: {v}' for (k, v) in d.items()])
|
def print_dict(d: dict, grab: Optional[bool]=None, ret: Optional[bool]=None) -> (str | None):
dstr = dict_to_str(d, grab=grab)
log.info(dstr)
return (dstr if ret else None)
|
def clear_cuda_cache():
import gc
gc.collect()
with torch.no_grad():
torch.cuda.empty_cache()
torch.clear_autocast_cache()
|
def get_timestamp(fstr=None):
'Get formatted timestamp.'
now = datetime.datetime.now()
return (now.strftime('%Y-%m-%d-%H%M%S') if (fstr is None) else now.strftime(fstr))
|
def seed_everything(seed: int):
import random
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
|
def check_diff(x: Any, y: Any, name: Optional[str]=None) -> np.ndarray:
if isinstance(x, State):
xd = {'x': x.x, 'v': x.v, 'beta': x.beta}
yd = {'x': y.x, 'v': y.v, 'beta': y.beta}
check_diff(xd, yd, name='State')
elif (isinstance(x, dict) and isinstance(y, dict)):
for ((kx, vx), (ky, vy)) in zip(x.items(), y.items()):
if (kx == ky):
check_diff(vx, vy, name=kx)
else:
log.warning('Mismatch encountered!')
log.warning(f'kx: {kx}')
log.warning(f'ky: {ky}')
vy_ = y.get(kx, None)
if (vy_ is not None):
check_diff(vx, vy_, name=kx)
else:
log.warning(f'{kx} not in y, skipping!')
continue
elif (isinstance(x, (list, tuple)) and isinstance(y, (list, tuple))):
assert (len(x) == len(y))
for idx in range(len(x)):
check_diff(x[idx], y[idx], name=f'{name}, {idx}')
else:
x = grab_tensor(x)
y = grab_tensor(y)
diff = np.array((x - y))
dstr = []
if (name is not None):
dstr.append(f"'{name}''")
dstr.extend((f' min(diff): {np.min(diff)}', f' max(diff): {np.max(diff)}', f' sum(diff): {np.sum(diff)}', f' sum(diff ** 2): {np.sum((diff ** 2))}'))
if (len(diff.shape) > 1):
dstr.extend((f' mean(diff): {np.mean(diff)}', f' std(diff): {np.std(diff)}', f' np.allclose: {np.allclose(x, y)}'))
log.info('\n'.join(dstr))
|
def update_dict(dnew: dict, dold: Optional[dict]=None) -> tuple[(list[str], dict)]:
import torch
import tensorflow as tf
dold = ({} if (dold is None) else dold)
mstr = []
for (key, val) in dnew.items():
if isinstance(val, (torch.Tensor, tf.Tensor)):
val = grab_tensor(val)
if isinstance(val, list):
if isinstance(val[0], torch.Tensor):
val = grab_tensor(torch.stack(val))
elif isinstance(val[0], tf.Tensor):
val = grab_tensor(tf.stack(val))
else:
try:
val = np.stack(val)
except Exception as exc:
log.exception(exc)
else:
val = np.array(val)
try:
mstr.append(f'{key}={val.mean():^5.4f}')
except AttributeError:
mstr.append(f'{key}={val:^5.4f}')
try:
dold[key].append(val)
except NameError:
dold[key] = [val]
return (mstr, dold)
|
def setup_annealing_schedule(cfg: DictConfig) -> AnnealingSchedule:
steps = Steps(**cfg.steps)
beta_init = cfg.get('beta_init', None)
beta_final = cfg.get('beta_final', None)
if (beta_init is None):
beta_init = 1.0
log.warn(f'beta_init not specified!using default: beta_init = {beta_init}')
if (beta_final is None):
beta_final = beta_init
log.warn(f'beta_final not specified!using beta_final = beta_init = {beta_init}')
sched = AnnealingSchedule(beta_init, beta_final)
sched.setup(nera=steps.nera, nepoch=steps.nepoch, beta_init=beta_init, beta_final=beta_final)
return sched
|
def save_dataset(dataset: xr.Dataset, outdir: os.PathLike, use_hdf5: Optional[bool]=True, job_type: Optional[str]=None, **kwargs) -> Path:
if use_hdf5:
fname = ('dataset.h5' if (job_type is None) else f'{job_type}_data.h5')
outfile = Path(outdir).joinpath(fname)
try:
dataset_to_h5pyfile(outfile, dataset=dataset, **kwargs)
except TypeError:
log.warning('Unable to save as `.h5` file, falling back to `netCDF4`')
save_dataset(dataset, outdir=outdir, use_hdf5=False, job_type=job_type, **kwargs)
else:
fname = ('dataset.nc' if (job_type is None) else f'{job_type}_dataset.nc')
outfile = Path(outdir).joinpath(fname)
mode = ('a' if outfile.is_file() else 'w')
log.info(f'Saving dataset to: {outfile.as_posix()}')
outfile.parent.mkdir(exist_ok=True, parents=True)
dataset.to_netcdf(outfile.as_posix(), mode=mode)
return outfile
|
def dataset_to_h5pyfile(hfile: os.PathLike, dataset: xr.Dataset, **kwargs):
log.info(f'Saving dataset to: {hfile}')
f = h5py.File(hfile, 'a')
for (key, val) in dataset.data_vars.items():
arr = val.values
if (len(arr) == 0):
continue
if (key in list(f.keys())):
shape = (f[key].shape[0] + arr.shape[0])
f[key].resize(shape, axis=0)
f[key][(- arr.shape[0]):] = arr
else:
maxshape = (None,)
if (len(arr.shape) > 1):
maxshape = (None, *arr.shape[1:])
f.create_dataset(key, data=arr, maxshape=maxshape, **kwargs)
f.close()
|
def dict_from_h5pyfile(hfile: os.PathLike) -> dict:
f = h5py.File(hfile, 'r')
data = {key: f[key] for key in list(f.keys())}
f.close()
return data
|
def dataset_from_h5pyfile(hfile: os.PathLike) -> xr.Dataset:
f = h5py.File(hfile, 'r')
data = {key: f[key] for key in list(f.keys())}
f.close()
return xr.Dataset(data)
|
def load_job_data(logdir: os.PathLike, jobtype: str) -> xr.Dataset:
assert (jobtype in {'train', 'eval', 'hmc'})
fpath = Path(logdir).joinpath(f'{jobtype}', 'data', f'{jobtype}_data.h5')
assert fpath.is_file()
return dataset_from_h5pyfile(fpath)
|
def load_time_data(logdir: os.PathLike, jobtype: str) -> pd.DataFrame:
assert (jobtype in {'train', 'eval', 'hmc'})
fpaths = Path(logdir).rglob(f'step-timer-{jobtype}')
data = {}
for (idx, fpath) in enumerate(fpaths):
tdata = pd.read_csv(fpath)
data[f'{idx}'] = tdata
return pd.DataFrame(data)
|
def _load_from_dir(logdir: os.PathLike, to_load: str) -> (xr.Dataset | pd.DataFrame):
if (to_load in {'train', 'eval', 'hmc'}):
return load_job_data(logdir=logdir, jobtype=to_load)
if (to_load in {'time', 'timing'}):
return load_time_data(logdir, jobtype=to_load)
raise ValueError('Unexpected argument for `to_load`')
|
def load_from_dir(logdir: os.PathLike, to_load: (str | list[str])) -> dict[(str, xr.Dataset)]:
assert (to_load in ['train', 'eval', 'hmc', 'time', 'timing'])
data = {}
if isinstance(to_load, list):
for i in to_load:
data[i] = _load_from_dir(logdir, to_load)
elif isinstance(to_load, str):
data[to_load] = _load_from_dir(logdir, to_load)
return data
|
def latvolume_to_str(latvolume: list[int]):
return 'x'.join([str(i) for i in latvolume])
|
def check_nonempty(fpath: os.PathLike) -> bool:
return (Path(fpath).is_dir() and (len(os.listdir(fpath)) > 0))
|
def check_jobdir(fpath: os.PathLike) -> bool:
jobdir = Path(fpath)
pdir = jobdir.joinpath('plots')
ddir = jobdir.joinpath('data')
ldir = jobdir.joinpath('logs')
return (check_nonempty(pdir) and check_nonempty(ddir) and check_nonempty(ldir))
|
def check_if_logdir(fpath: os.PathLike) -> bool:
logdir = Path(fpath)
contents = os.listdir(logdir)
contents = os.listdir(logdir)
in_contents = (('train' in contents) and ('eval' in contents) and ('hmc' in contents))
non_empty = (check_nonempty(logdir.joinpath('train')) and check_nonempty(logdir.joinpath('eval')) and check_nonempty(logdir.joinpath('hmc')))
return (in_contents and non_empty)
|
def check_if_matching_logdir(fpath: os.PathLike, config_str: str) -> bool:
return (check_if_logdir(fpath) and (config_str in Path(fpath).as_posix()))
|
def find_logdirs(rootdir: os.PathLike) -> list[Path]:
'Every `logdir` should contain a `config_tree.log` file.'
return [Path(i).parent for i in Path(rootdir).rglob('config_tree.log') if check_if_logdir(Path(i).parent)]
|
def _match_beta(logdir, beta: Optional[float]=None) -> bool:
return ((beta is not None) and (f'beta-{beta:.1f}' in Path(logdir).as_posix()))
|
def _match_group(logdir, group: Optional[str]=None) -> bool:
return ((group is not None) and (group in Path(logdir).as_posix()))
|
def _match_nlf(logdir, nlf: Optional[int]=None) -> bool:
return ((nlf is not None) and (f'nlf-{nlf}' in Path(logdir).as_posix()))
|
def _match_merge_directions(logdir, merge_directions: Optional[bool]=None) -> bool:
return ((merge_directions is not None) and (f'merge_directions-{merge_directions}' in Path(logdir).as_posix()))
|
def _match_framework(logdir: os.PathLike, framework: Optional[str]=None) -> bool:
return ((framework is not None) and (framework in Path(logdir).as_posix()))
|
def _match_latvolume(logdir: os.PathLike, latvolume: Optional[list[int]]=None) -> bool:
return ((latvolume is not None) and ('x'.join([str(i) for i in latvolume]) in Path(logdir).as_posix()))
|
def filter_logdirs(logdirs: list, beta: Optional[float]=None, group: Optional[str]=None, nlf: Optional[int]=None, merge_directions: Optional[bool]=None, framework: Optional[str]=None, latvolume: Optional[list[int]]=None) -> list[os.PathLike]:
'Filter logdirs by criteria.'
matches = []
for logdir in logdirs:
if _match_beta(logdir, beta):
matches.append(logdir)
if _match_group(logdir, group):
matches.append(logdir)
if _match_nlf(logdir, nlf):
matches.append(logdir)
if _match_merge_directions(logdir, merge_directions):
matches.append(logdir)
if _match_framework(logdir, framework):
matches.append(logdir)
if _match_latvolume(logdir, latvolume):
matches.append(logdir)
return matches
|
def find_matching_logdirs(rootdir: os.PathLike, beta: Optional[float]=None, group: Optional[str]=None, nlf: Optional[int]=None, merge_directions: Optional[bool]=None, framework: Optional[str]=None, latvolume: Optional[list[int]]=None):
logdirs = find_logdirs(rootdir)
return filter_logdirs(logdirs, beta=beta, group=group, nlf=nlf, merge_directions=merge_directions, framework=framework, latvolume=latvolume)
|
def find_runs_with_matching_options(config: dict[(str, Any)], rootdir: Optional[os.PathLike]=None) -> list[Path]:
'Find runs with options matching those specified in `config`.'
if (rootdir is None):
rootdir = Path(OUTPUTS_DIR)
config_files = [i.resolve() for i in Path(rootdir).rglob('*.yaml') if (i.is_file and (i.name == 'config.yaml'))]
matches = []
for f in config_files:
fpath = Path(f)
assert fpath.is_file()
loaded = OmegaConf.to_container(OmegaConf.load(f), resolve=True)
assert isinstance(loaded, dict)
checks = []
for (key, val) in config.items():
if ((key in loaded) and (val == loaded.get(key, None))):
checks.append(1)
else:
checks.append(0)
checks.append((val == loaded.get(key, None)))
if (sum(matches) == len(matches)):
matches.append(fpath)
return matches
|
def table_to_dict(table: Table, data: Optional[dict]=None) -> dict:
if (data is None):
return {column.header: [float(i) for i in list(column.cells)] for column in table.columns}
for column in table.columns:
try:
data[column.header].extend([float(i) for i in list(column.cells)])
except KeyError:
data[column.header] = [float(i) for i in list(column.cells)]
return data
|
def save_logs(tables: Optional[dict[(str, Table)]]=None, summaries: Optional[list[str]]=None, job_type: Optional[str]=None, logdir: Optional[os.PathLike]=None, run: Optional[Any]=None, rank: Optional[int]=None) -> None:
job_type = ('job' if (job_type is None) else job_type)
logdir = (Path(os.getcwd()).joinpath('logs') if (logdir is None) else Path(logdir))
table_dir = logdir.joinpath('tables')
tdir = table_dir.joinpath('txt')
hdir = table_dir.joinpath('html')
hfile = hdir.joinpath('table.html')
hfile.parent.mkdir(exist_ok=True, parents=True)
tfile = tdir.joinpath('table.txt')
tfile.parent.mkdir(exist_ok=True, parents=True)
data = {}
if (tables is not None):
for (idx, table) in tables.items():
data = (table_to_dict(table) if (idx == 0) else table_to_dict(table, data))
df = pd.DataFrame.from_dict(data)
dfile = Path(logdir).joinpath(f'{job_type}_table.csv')
df.to_csv(dfile.as_posix(), mode='a')
if (run is not None):
run.log({f'DataFrames/{job_type}': wandb.Table(data=df)})
if (summaries is not None):
sfile = logdir.joinpath('summaries.txt').as_posix()
with open(sfile, 'a') as f:
f.write('\n'.join(summaries))
|
def make_subdirs(basedir: os.PathLike):
dirs = {}
assert Path(basedir).is_dir()
for key in ['logs', 'data', 'plots']:
d = Path(basedir).joinpath(key)
d.mkdir(exist_ok=True, parents=True)
dirs[key] = d
return dirs
|
def save_figure(fig: plt.Figure, key: str, outdir: os.PathLike):
pngdir = Path(outdir).joinpath('pngs')
svgdir = Path(outdir).joinpath('svgs')
pngdir.mkdir(parents=True, exist_ok=True)
svgdir.mkdir(parents=True, exist_ok=True)
svgfile = svgdir.joinpath(f'{key}.svg')
pngfile = pngdir.joinpath(f'{key}.png')
fig.savefig(svgfile.as_posix(), transparent=True, bbox_inches='tight')
fig.savefig(pngfile.as_posix(), transparent=True, bbox_inches='tight')
return fig
|
def savefig(fname: str, outdir: os.PathLike, tstamp: Optional[bool]=True):
outdir = Path(outdir)
if tstamp:
fname = f"{fname}-{get_timestamp('%Y-%m-%d-%H%M%S')}"
print(f'Saving {fname} to {outdir}')
for ext in {'png', 'svg'}:
edir = Path(outdir).joinpath(f'{ext}s')
edir.mkdir(exist_ok=True, parents=True)
outfile = Path(edir).joinpath(f'{fname}.{ext}')
plt.savefig(outfile, dpi=450, bbox_inches='tight')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.