repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Hierarchical-Localization | Hierarchical-Localization-master/hloc/localize_inloc.py | import argparse
from pathlib import Path
import numpy as np
import h5py
from scipy.io import loadmat
import torch
from tqdm import tqdm
import pickle
import cv2
import pycolmap
from . import logger
from .utils.parsers import parse_retrieval, names_to_pair
def interpolate_scan(scan, kp):
h, w, c = scan.shape
kp = kp / np.array([[w-1, h-1]]) * 2 - 1
assert np.all(kp > -1) and np.all(kp < 1)
scan = torch.from_numpy(scan).permute(2, 0, 1)[None]
kp = torch.from_numpy(kp)[None, None]
grid_sample = torch.nn.functional.grid_sample
# To maximize the number of points that have depth:
# do bilinear interpolation first and then nearest for the remaining points
interp_lin = grid_sample(
scan, kp, align_corners=True, mode='bilinear')[0, :, 0]
interp_nn = torch.nn.functional.grid_sample(
scan, kp, align_corners=True, mode='nearest')[0, :, 0]
interp = torch.where(torch.isnan(interp_lin), interp_nn, interp_lin)
valid = ~torch.any(torch.isnan(interp), 0)
kp3d = interp.T.numpy()
valid = valid.numpy()
return kp3d, valid
def get_scan_pose(dataset_dir, rpath):
split_image_rpath = rpath.split('/')
floor_name = split_image_rpath[-3]
scan_id = split_image_rpath[-2]
image_name = split_image_rpath[-1]
building_name = image_name[:3]
path = Path(
dataset_dir, 'database/alignments', floor_name,
f'transformations/{building_name}_trans_{scan_id}.txt')
with open(path) as f:
raw_lines = f.readlines()
P_after_GICP = np.array([
np.fromstring(raw_lines[7], sep=' '),
np.fromstring(raw_lines[8], sep=' '),
np.fromstring(raw_lines[9], sep=' '),
np.fromstring(raw_lines[10], sep=' ')
])
return P_after_GICP
def pose_from_cluster(dataset_dir, q, retrieved, feature_file, match_file,
skip=None):
height, width = cv2.imread(str(dataset_dir / q)).shape[:2]
cx = .5 * width
cy = .5 * height
focal_length = 4032. * 28. / 36.
all_mkpq = []
all_mkpr = []
all_mkp3d = []
all_indices = []
kpq = feature_file[q]['keypoints'].__array__()
num_matches = 0
for i, r in enumerate(retrieved):
kpr = feature_file[r]['keypoints'].__array__()
pair = names_to_pair(q, r)
m = match_file[pair]['matches0'].__array__()
v = (m > -1)
if skip and (np.count_nonzero(v) < skip):
continue
mkpq, mkpr = kpq[v], kpr[m[v]]
num_matches += len(mkpq)
scan_r = loadmat(Path(dataset_dir, r + '.mat'))["XYZcut"]
mkp3d, valid = interpolate_scan(scan_r, mkpr)
Tr = get_scan_pose(dataset_dir, r)
mkp3d = (Tr[:3, :3] @ mkp3d.T + Tr[:3, -1:]).T
all_mkpq.append(mkpq[valid])
all_mkpr.append(mkpr[valid])
all_mkp3d.append(mkp3d[valid])
all_indices.append(np.full(np.count_nonzero(valid), i))
all_mkpq = np.concatenate(all_mkpq, 0)
all_mkpr = np.concatenate(all_mkpr, 0)
all_mkp3d = np.concatenate(all_mkp3d, 0)
all_indices = np.concatenate(all_indices, 0)
cfg = {
'model': 'SIMPLE_PINHOLE',
'width': width,
'height': height,
'params': [focal_length, cx, cy]
}
ret = pycolmap.absolute_pose_estimation(
all_mkpq, all_mkp3d, cfg, 48.00)
ret['cfg'] = cfg
return ret, all_mkpq, all_mkpr, all_mkp3d, all_indices, num_matches
def main(dataset_dir, retrieval, features, matches, results,
skip_matches=None):
assert retrieval.exists(), retrieval
assert features.exists(), features
assert matches.exists(), matches
retrieval_dict = parse_retrieval(retrieval)
queries = list(retrieval_dict.keys())
feature_file = h5py.File(features, 'r', libver='latest')
match_file = h5py.File(matches, 'r', libver='latest')
poses = {}
logs = {
'features': features,
'matches': matches,
'retrieval': retrieval,
'loc': {},
}
logger.info('Starting localization...')
for q in tqdm(queries):
db = retrieval_dict[q]
ret, mkpq, mkpr, mkp3d, indices, num_matches = pose_from_cluster(
dataset_dir, q, db, feature_file, match_file, skip_matches)
poses[q] = (ret['qvec'], ret['tvec'])
logs['loc'][q] = {
'db': db,
'PnP_ret': ret,
'keypoints_query': mkpq,
'keypoints_db': mkpr,
'3d_points': mkp3d,
'indices_db': indices,
'num_matches': num_matches,
}
logger.info(f'Writing poses to {results}...')
with open(results, 'w') as f:
for q in queries:
qvec, tvec = poses[q]
qvec = ' '.join(map(str, qvec))
tvec = ' '.join(map(str, tvec))
name = q.split("/")[-1]
f.write(f'{name} {qvec} {tvec}\n')
logs_path = f'{results}_logs.pkl'
logger.info(f'Writing logs to {logs_path}...')
with open(logs_path, 'wb') as f:
pickle.dump(logs, f)
logger.info('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=Path, required=True)
parser.add_argument('--retrieval', type=Path, required=True)
parser.add_argument('--features', type=Path, required=True)
parser.add_argument('--matches', type=Path, required=True)
parser.add_argument('--results', type=Path, required=True)
parser.add_argument('--skip_matches', type=int)
args = parser.parse_args()
main(**args.__dict__)
| 5,545 | 30.333333 | 79 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/match_features.py | import argparse
from typing import Union, Optional, Dict, List, Tuple
from pathlib import Path
import pprint
from queue import Queue
from threading import Thread
from functools import partial
from tqdm import tqdm
import h5py
import torch
from . import matchers, logger
from .utils.base_model import dynamic_load
from .utils.parsers import names_to_pair, names_to_pair_old, parse_retrieval
'''
A set of standard configurations that can be directly selected from the command
line using their name. Each is a dictionary with the following entries:
- output: the name of the match file that will be generated.
- model: the model configuration, as passed to a feature matcher.
'''
confs = {
'superpoint+lightglue': {
'output': 'matches-superpoint-lightglue',
'model': {
'name': 'lightglue',
'features': 'superpoint',
},
},
'disk+lightglue': {
'output': 'matches-disk-lightglue',
'model': {
'name': 'lightglue',
'features': 'disk',
},
},
'superglue': {
'output': 'matches-superglue',
'model': {
'name': 'superglue',
'weights': 'outdoor',
'sinkhorn_iterations': 50,
},
},
'superglue-fast': {
'output': 'matches-superglue-it5',
'model': {
'name': 'superglue',
'weights': 'outdoor',
'sinkhorn_iterations': 5,
},
},
'NN-superpoint': {
'output': 'matches-NN-mutual-dist.7',
'model': {
'name': 'nearest_neighbor',
'do_mutual_check': True,
'distance_threshold': 0.7,
},
},
'NN-ratio': {
'output': 'matches-NN-mutual-ratio.8',
'model': {
'name': 'nearest_neighbor',
'do_mutual_check': True,
'ratio_threshold': 0.8,
}
},
'NN-mutual': {
'output': 'matches-NN-mutual',
'model': {
'name': 'nearest_neighbor',
'do_mutual_check': True,
},
},
'adalam': {
'output': 'matches-adalam',
'model': {
'name': 'adalam'
},
}
}
class WorkQueue():
def __init__(self, work_fn, num_threads=1):
self.queue = Queue(num_threads)
self.threads = [
Thread(target=self.thread_fn, args=(work_fn,))
for _ in range(num_threads)
]
for thread in self.threads:
thread.start()
def join(self):
for thread in self.threads:
self.queue.put(None)
for thread in self.threads:
thread.join()
def thread_fn(self, work_fn):
item = self.queue.get()
while item is not None:
work_fn(item)
item = self.queue.get()
def put(self, data):
self.queue.put(data)
class FeaturePairsDataset(torch.utils.data.Dataset):
def __init__(self, pairs, feature_path_q, feature_path_r):
self.pairs = pairs
self.feature_path_q = feature_path_q
self.feature_path_r = feature_path_r
def __getitem__(self, idx):
name0, name1 = self.pairs[idx]
data = {}
with h5py.File(self.feature_path_q, 'r') as fd:
grp = fd[name0]
for k, v in grp.items():
data[k+'0'] = torch.from_numpy(v.__array__()).float()
# some matchers might expect an image but only use its size
data['image0'] = torch.empty((1,)+tuple(grp['image_size'])[::-1])
with h5py.File(self.feature_path_r, 'r') as fd:
grp = fd[name1]
for k, v in grp.items():
data[k+'1'] = torch.from_numpy(v.__array__()).float()
data['image1'] = torch.empty((1,)+tuple(grp['image_size'])[::-1])
return data
def __len__(self):
return len(self.pairs)
def writer_fn(inp, match_path):
pair, pred = inp
with h5py.File(str(match_path), 'a', libver='latest') as fd:
if pair in fd:
del fd[pair]
grp = fd.create_group(pair)
matches = pred['matches0'][0].cpu().short().numpy()
grp.create_dataset('matches0', data=matches)
if 'matching_scores0' in pred:
scores = pred['matching_scores0'][0].cpu().half().numpy()
grp.create_dataset('matching_scores0', data=scores)
def main(conf: Dict,
pairs: Path, features: Union[Path, str],
export_dir: Optional[Path] = None,
matches: Optional[Path] = None,
features_ref: Optional[Path] = None,
overwrite: bool = False) -> Path:
if isinstance(features, Path) or Path(features).exists():
features_q = features
if matches is None:
raise ValueError('Either provide both features and matches as Path'
' or both as names.')
else:
if export_dir is None:
raise ValueError('Provide an export_dir if features is not'
f' a file path: {features}.')
features_q = Path(export_dir, features+'.h5')
if matches is None:
matches = Path(
export_dir, f'{features}_{conf["output"]}_{pairs.stem}.h5')
if features_ref is None:
features_ref = features_q
match_from_paths(conf, pairs, matches, features_q, features_ref, overwrite)
return matches
def find_unique_new_pairs(pairs_all: List[Tuple[str]], match_path: Path = None):
'''Avoid to recompute duplicates to save time.'''
pairs = set()
for i, j in pairs_all:
if (j, i) not in pairs:
pairs.add((i, j))
pairs = list(pairs)
if match_path is not None and match_path.exists():
with h5py.File(str(match_path), 'r', libver='latest') as fd:
pairs_filtered = []
for i, j in pairs:
if (names_to_pair(i, j) in fd or
names_to_pair(j, i) in fd or
names_to_pair_old(i, j) in fd or
names_to_pair_old(j, i) in fd):
continue
pairs_filtered.append((i, j))
return pairs_filtered
return pairs
@torch.no_grad()
def match_from_paths(conf: Dict,
pairs_path: Path,
match_path: Path,
feature_path_q: Path,
feature_path_ref: Path,
overwrite: bool = False) -> Path:
logger.info('Matching local features with configuration:'
f'\n{pprint.pformat(conf)}')
if not feature_path_q.exists():
raise FileNotFoundError(f'Query feature file {feature_path_q}.')
if not feature_path_ref.exists():
raise FileNotFoundError(f'Reference feature file {feature_path_ref}.')
match_path.parent.mkdir(exist_ok=True, parents=True)
assert pairs_path.exists(), pairs_path
pairs = parse_retrieval(pairs_path)
pairs = [(q, r) for q, rs in pairs.items() for r in rs]
pairs = find_unique_new_pairs(pairs, None if overwrite else match_path)
if len(pairs) == 0:
logger.info('Skipping the matching.')
return
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(matchers, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
dataset = FeaturePairsDataset(pairs, feature_path_q, feature_path_ref)
loader = torch.utils.data.DataLoader(
dataset, num_workers=5, batch_size=1, shuffle=False, pin_memory=True)
writer_queue = WorkQueue(partial(writer_fn, match_path=match_path), 5)
for idx, data in enumerate(tqdm(loader, smoothing=.1)):
data = {k: v if k.startswith('image')
else v.to(device, non_blocking=True) for k, v in data.items()}
pred = model(data)
pair = names_to_pair(*pairs[idx])
writer_queue.put((pair, pred))
writer_queue.join()
logger.info('Finished exporting matches.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pairs', type=Path, required=True)
parser.add_argument('--export_dir', type=Path)
parser.add_argument('--features', type=str,
default='feats-superpoint-n4096-r1024')
parser.add_argument('--matches', type=Path)
parser.add_argument('--conf', type=str, default='superglue',
choices=list(confs.keys()))
args = parser.parse_args()
main(confs[args.conf], args.pairs, args.features, args.export_dir)
| 8,514 | 32.523622 | 80 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/match_dense.py | from tqdm import tqdm
import numpy as np
import h5py
import torch
from pathlib import Path
from typing import Dict, Iterable, Optional, List, Tuple, Union, Set
import pprint
import argparse
import torchvision.transforms.functional as F
from types import SimpleNamespace
from collections import defaultdict
from scipy.spatial import KDTree
from collections import Counter
from itertools import chain
from . import matchers, logger
from .utils.base_model import dynamic_load
from .utils.parsers import parse_retrieval, names_to_pair
from .match_features import find_unique_new_pairs
from .extract_features import read_image, resize_image
from .utils.io import list_h5_names
# Default usage:
# dense_conf = confs['loftr']
# features, matches = main(dense_conf, pairs, images, export_dir=outputs)
# Use SuperPoint keypoints as anchors:
# feature_conf = extract_features.confs['superpoint_aachen']
# features_sp = extract_features.main(feature_conf, images)
# features, matches = main(dense_conf, pairs, images,
# export_dir=outputs,
# features_ref=features_sp)
# Localization:
# loc_features, loc_matches = main(matcher_conf, loc_pairs,
# images, export_dir=outputs, features_ref=features, max_kps=None)
confs = {
# Best quality but loads of points. Only use for small scenes
'loftr': {
'output': 'matches-loftr',
'model': {
'name': 'loftr',
'weights': 'outdoor'
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
'dfactor': 8
},
'max_error': 1, # max error for assigned keypoints (in px)
'cell_size': 1, # size of quantization patch (max 1 kp/patch)
},
# Semi-scalable loftr which limits detected keypoints
'loftr_aachen': {
'output': 'matches-loftr_aachen',
'model': {
'name': 'loftr',
'weights': 'outdoor'
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
'dfactor': 8
},
'max_error': 2, # max error for assigned keypoints (in px)
'cell_size': 8, # size of quantization patch (max 1 kp/patch)
},
# Use for matching superpoint feats with loftr
'loftr_superpoint': {
'output': 'matches-loftr_aachen',
'model': {
'name': 'loftr',
'weights': 'outdoor'
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
'dfactor': 8
},
'max_error': 4, # max error for assigned keypoints (in px)
'cell_size': 4, # size of quantization patch (max 1 kp/patch)
},
}
def to_cpts(kpts, ps):
if ps > 0.0:
kpts = np.round(np.round((kpts + 0.5) / ps) * ps - 0.5, 2)
return [tuple(cpt) for cpt in kpts]
def assign_keypoints(kpts: np.ndarray,
other_cpts: Union[List[Tuple], np.ndarray],
max_error: float,
update: bool = False,
ref_bins: Optional[List[Counter]] = None,
scores: Optional[np.ndarray] = None,
cell_size: Optional[int] = None):
if not update:
# Without update this is just a NN search
dist, kpt_ids = KDTree(np.array(other_cpts)).query(kpts)
valid = (dist <= max_error)
kpt_ids[~valid] = -1
return kpt_ids
else:
ps = cell_size if cell_size is not None else max_error
ps = max(ps, max_error)
# With update we quantize and bin (optionally)
assert isinstance(other_cpts, list)
kpt_ids = []
cpts = to_cpts(kpts, ps)
bpts = to_cpts(kpts, int(max_error))
cp_to_id = {val: i for i, val in enumerate(other_cpts)}
for i, (cpt, bpt) in enumerate(zip(cpts, bpts)):
try:
kid = cp_to_id[cpt]
except KeyError:
kid = len(cp_to_id)
cp_to_id[cpt] = kid
other_cpts.append(cpt)
if ref_bins is not None:
ref_bins.append(Counter())
if ref_bins is not None:
score = scores[i] if scores is not None else 1
ref_bins[cp_to_id[cpt]][bpt] += score
kpt_ids.append(kid)
return np.array(kpt_ids)
def get_grouped_ids(array):
# Group array indices based on its values
# all duplicates are grouped as a set
idx_sort = np.argsort(array)
sorted_array = array[idx_sort]
_, ids, _ = np.unique(sorted_array, return_counts=True,
return_index=True)
res = np.split(idx_sort, ids[1:])
return res
def get_unique_matches(match_ids, scores):
if len(match_ids.shape) == 1:
return [0]
isets1 = get_grouped_ids(match_ids[:, 0])
isets2 = get_grouped_ids(match_ids[:, 0])
uid1s = [ids[scores[ids].argmax()] for ids in isets1 if len(ids) > 0]
uid2s = [ids[scores[ids].argmax()] for ids in isets2 if len(ids) > 0]
uids = list(set(uid1s).intersection(uid2s))
return match_ids[uids], scores[uids]
def matches_to_matches0(matches, scores):
if len(matches) == 0:
return np.zeros(0, dtype=np.int32), np.zeros(0, dtype=np.float16)
n_kps0 = np.max(matches[:, 0]) + 1
matches0 = -np.ones((n_kps0,))
scores0 = np.zeros((n_kps0,))
matches0[matches[:, 0]] = matches[:, 1]
scores0[matches[:, 0]] = scores
return matches0.astype(np.int32), scores0.astype(np.float16)
def kpids_to_matches0(kpt_ids0, kpt_ids1, scores):
valid = (kpt_ids0 != -1) & (kpt_ids1 != -1)
matches = np.dstack([kpt_ids0[valid], kpt_ids1[valid]])
matches = matches.reshape(-1, 2)
scores = scores[valid]
# Remove n-to-1 matches
matches, scores = get_unique_matches(matches, scores)
return matches_to_matches0(matches, scores)
def scale_keypoints(kpts, scale):
if np.any(scale != 1.0):
kpts *= kpts.new_tensor(scale)
return kpts
class ImagePairDataset(torch.utils.data.Dataset):
default_conf = {
'grayscale': True,
'resize_max': 1024,
'dfactor': 8,
'cache_images': False,
}
def __init__(self, image_dir, conf, pairs):
self.image_dir = image_dir
self.conf = conf = SimpleNamespace(**{**self.default_conf, **conf})
self.pairs = pairs
if self.conf.cache_images:
image_names = set(sum(pairs, ())) # unique image names in pairs
logger.info(
f'Loading and caching {len(image_names)} unique images.')
self.images = {}
self.scales = {}
for name in tqdm(image_names):
image = read_image(self.image_dir / name, self.conf.grayscale)
self.images[name], self.scales[name] = self.preprocess(image)
def preprocess(self, image: np.ndarray):
image = image.astype(np.float32, copy=False)
size = image.shape[:2][::-1]
scale = np.array([1.0, 1.0])
if self.conf.resize_max:
scale = self.conf.resize_max / max(size)
if scale < 1.0:
size_new = tuple(int(round(x*scale)) for x in size)
image = resize_image(image, size_new, 'cv2_area')
scale = np.array(size) / np.array(size_new)
if self.conf.grayscale:
assert image.ndim == 2, image.shape
image = image[None]
else:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = torch.from_numpy(image / 255.0).float()
# assure that the size is divisible by dfactor
size_new = tuple(map(
lambda x: int(x // self.conf.dfactor * self.conf.dfactor),
image.shape[-2:]))
image = F.resize(image, size=size_new)
scale = np.array(size) / np.array(size_new)[::-1]
return image, scale
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
name0, name1 = self.pairs[idx]
if self.conf.cache_images:
image0, scale0 = self.images[name0], self.scales[name0]
image1, scale1 = self.images[name1], self.scales[name1]
else:
image0 = read_image(self.image_dir / name0, self.conf.grayscale)
image1 = read_image(self.image_dir / name1, self.conf.grayscale)
image0, scale0 = self.preprocess(image0)
image1, scale1 = self.preprocess(image1)
return image0, image1, scale0, scale1, name0, name1
@torch.no_grad()
def match_dense(conf: Dict,
pairs: List[Tuple[str, str]],
image_dir: Path,
match_path: Path, # out
existing_refs: Optional[List] = []):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(matchers, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
dataset = ImagePairDataset(image_dir, conf["preprocessing"], pairs)
loader = torch.utils.data.DataLoader(
dataset, num_workers=16, batch_size=1, shuffle=False)
logger.info("Performing dense matching...")
with h5py.File(str(match_path), 'a') as fd:
for data in tqdm(loader, smoothing=.1):
# load image-pair data
image0, image1, scale0, scale1, (name0,), (name1,) = data
scale0, scale1 = scale0[0].numpy(), scale1[0].numpy()
image0, image1 = image0.to(device), image1.to(device)
# match semi-dense
# for consistency with pairs_from_*: refine kpts of image0
if name0 in existing_refs:
# special case: flip to enable refinement in query image
pred = model({'image0': image1, 'image1': image0})
pred = {**pred,
'keypoints0': pred['keypoints1'],
'keypoints1': pred['keypoints0']}
else:
# usual case
pred = model({'image0': image0, 'image1': image1})
# Rescale keypoints and move to cpu
kpts0, kpts1 = pred['keypoints0'], pred['keypoints1']
kpts0 = scale_keypoints(kpts0 + 0.5, scale0) - 0.5
kpts1 = scale_keypoints(kpts1 + 0.5, scale1) - 0.5
kpts0 = kpts0.cpu().numpy()
kpts1 = kpts1.cpu().numpy()
scores = pred['scores'].cpu().numpy()
# Write matches and matching scores in hloc format
pair = names_to_pair(name0, name1)
if pair in fd:
del fd[pair]
grp = fd.create_group(pair)
# Write dense matching output
grp.create_dataset('keypoints0', data=kpts0)
grp.create_dataset('keypoints1', data=kpts1)
grp.create_dataset('scores', data=scores)
del model, loader
# default: quantize all!
def load_keypoints(conf: Dict,
feature_paths_refs: List[Path],
quantize: Optional[set] = None):
name2ref = {n: i for i, p in enumerate(feature_paths_refs)
for n in list_h5_names(p)}
existing_refs = set(name2ref.keys())
if quantize is None:
quantize = existing_refs # quantize all
if len(existing_refs) > 0:
logger.info(f'Loading keypoints from {len(existing_refs)} images.')
# Load query keypoints
cpdict = defaultdict(list)
bindict = defaultdict(list)
for name in existing_refs:
with h5py.File(str(feature_paths_refs[name2ref[name]]), 'r') as fd:
kps = fd[name]['keypoints'].__array__()
if name not in quantize:
cpdict[name] = kps
else:
if 'scores' in fd[name].keys():
kp_scores = fd[name]['scores'].__array__()
else:
# we set the score to 1.0 if not provided
# increase for more weight on reference keypoints for
# stronger anchoring
kp_scores = \
[1.0 for _ in range(kps.shape[0])]
# bin existing keypoints of reference images for association
assign_keypoints(
kps, cpdict[name], conf['max_error'], True, bindict[name],
kp_scores, conf['cell_size'])
return cpdict, bindict
def aggregate_matches(
conf: Dict,
pairs: List[Tuple[str, str]],
match_path: Path,
feature_path: Path,
required_queries: Optional[Set[str]] = None,
max_kps: Optional[int] = None,
cpdict: Dict[str, Iterable] = defaultdict(list),
bindict: Dict[str, List[Counter]] = defaultdict(list)):
if required_queries is None:
required_queries = set(sum(pairs, ()))
# default: do not overwrite existing features in feature_path!
required_queries -= set(list_h5_names(feature_path))
# if an entry in cpdict is provided as np.ndarray we assume it is fixed
required_queries -= set(
[k for k, v in cpdict.items() if isinstance(v, np.ndarray)])
# sort pairs for reduced RAM
pairs_per_q = Counter(list(chain(*pairs)))
pairs_score = [min(pairs_per_q[i], pairs_per_q[j]) for i, j in pairs]
pairs = [p for _, p in sorted(zip(pairs_score, pairs))]
if len(required_queries) > 0:
logger.info(f'Aggregating keypoints for {len(required_queries)} images.')
n_kps = 0
with h5py.File(str(match_path), 'a') as fd:
for name0, name1 in tqdm(pairs, smoothing=.1):
pair = names_to_pair(name0, name1)
grp = fd[pair]
kpts0 = grp['keypoints0'].__array__()
kpts1 = grp['keypoints1'].__array__()
scores = grp['scores'].__array__()
# Aggregate local features
update0 = name0 in required_queries
update1 = name1 in required_queries
# in localization we do not want to bin the query kp
# assumes that the query is name0!
if update0 and not update1 and max_kps is None:
max_error0 = cell_size0 = 0.0
else:
max_error0 = conf['max_error']
cell_size0 = conf['cell_size']
# Get match ids and extend query keypoints (cpdict)
mkp_ids0 = assign_keypoints(kpts0, cpdict[name0], max_error0,
update0, bindict[name0], scores,
cell_size0)
mkp_ids1 = assign_keypoints(kpts1, cpdict[name1], conf['max_error'],
update1, bindict[name1], scores,
conf['cell_size'])
# Build matches from assignments
matches0, scores0 = kpids_to_matches0(mkp_ids0, mkp_ids1, scores)
assert kpts0.shape[0] == scores.shape[0]
grp.create_dataset('matches0', data=matches0)
grp.create_dataset('matching_scores0', data=scores0)
# Convert bins to kps if finished, and store them
for name in (name0, name1):
pairs_per_q[name] -= 1
if pairs_per_q[name] > 0 or name not in required_queries:
continue
kp_score = [c.most_common(1)[0][1] for c in bindict[name]]
cpdict[name] = [c.most_common(1)[0][0] for c in bindict[name]]
cpdict[name] = np.array(cpdict[name], dtype=np.float32)
# Select top-k query kps by score (reassign matches later)
if max_kps:
top_k = min(max_kps, cpdict[name].shape[0])
top_k = np.argsort(kp_score)[::-1][:top_k]
cpdict[name] = cpdict[name][top_k]
kp_score = np.array(kp_score)[top_k]
# Write query keypoints
with h5py.File(feature_path, 'a') as kfd:
if name in kfd:
del kfd[name]
kgrp = kfd.create_group(name)
kgrp.create_dataset('keypoints', data=cpdict[name])
kgrp.create_dataset('score', data=kp_score)
n_kps += cpdict[name].shape[0]
del bindict[name]
if len(required_queries) > 0:
avg_kp_per_image = round(n_kps / len(required_queries), 1)
logger.info(f'Finished assignment, found {avg_kp_per_image} '
f'keypoints/image (avg.), total {n_kps}.')
return cpdict
def assign_matches(
pairs: List[Tuple[str, str]],
match_path: Path,
keypoints: Union[List[Path], Dict[str, np.array]],
max_error: float):
if isinstance(keypoints, list):
keypoints = load_keypoints({}, keypoints, kpts_as_bin=set([]))
assert len(set(sum(pairs, ())) - set(keypoints.keys())) == 0
with h5py.File(str(match_path), 'a') as fd:
for name0, name1 in tqdm(pairs):
pair = names_to_pair(name0, name1)
grp = fd[pair]
kpts0 = grp['keypoints0'].__array__()
kpts1 = grp['keypoints1'].__array__()
scores = grp['scores'].__array__()
# NN search across cell boundaries
mkp_ids0 = assign_keypoints(kpts0, keypoints[name0], max_error)
mkp_ids1 = assign_keypoints(kpts1, keypoints[name1], max_error)
matches0, scores0 = kpids_to_matches0(mkp_ids0, mkp_ids1,
scores)
# overwrite matches0 and matching_scores0
del grp['matches0'], grp['matching_scores0']
grp.create_dataset('matches0', data=matches0)
grp.create_dataset('matching_scores0', data=scores0)
@torch.no_grad()
def match_and_assign(conf: Dict,
pairs_path: Path,
image_dir: Path,
match_path: Path, # out
feature_path_q: Path, # out
feature_paths_refs: Optional[List[Path]] = [],
max_kps: Optional[int] = 8192,
overwrite: bool = False) -> Path:
for path in feature_paths_refs:
if not path.exists():
raise FileNotFoundError(f'Reference feature file {path}.')
pairs = parse_retrieval(pairs_path)
pairs = [(q, r) for q, rs in pairs.items() for r in rs]
pairs = find_unique_new_pairs(pairs, None if overwrite else match_path)
required_queries = set(sum(pairs, ()))
name2ref = {n: i for i, p in enumerate(feature_paths_refs)
for n in list_h5_names(p)}
existing_refs = required_queries.intersection(set(name2ref.keys()))
# images which require feature extraction
required_queries = required_queries - existing_refs
if feature_path_q.exists():
existing_queries = set(list_h5_names(feature_path_q))
feature_paths_refs.append(feature_path_q)
existing_refs = set.union(existing_refs, existing_queries)
if not overwrite:
required_queries = required_queries - existing_queries
if len(pairs) == 0 and len(required_queries) == 0:
logger.info("All pairs exist. Skipping dense matching.")
return
# extract semi-dense matches
match_dense(conf, pairs, image_dir, match_path,
existing_refs=existing_refs)
logger.info("Assigning matches...")
# Pre-load existing keypoints
cpdict, bindict = load_keypoints(
conf, feature_paths_refs,
quantize=required_queries)
# Reassign matches by aggregation
cpdict = aggregate_matches(
conf, pairs, match_path, feature_path=feature_path_q,
required_queries=required_queries, max_kps=max_kps, cpdict=cpdict,
bindict=bindict)
# Invalidate matches that are far from selected bin by reassignment
if max_kps is not None:
logger.info(f'Reassign matches with max_error={conf["max_error"]}.')
assign_matches(pairs, match_path, cpdict,
max_error=conf['max_error'])
@torch.no_grad()
def main(conf: Dict,
pairs: Path,
image_dir: Path,
export_dir: Optional[Path] = None,
matches: Optional[Path] = None, # out
features: Optional[Path] = None, # out
features_ref: Optional[Path] = None,
max_kps: Optional[int] = 8192,
overwrite: bool = False) -> Path:
logger.info('Extracting semi-dense features with configuration:'
f'\n{pprint.pformat(conf)}')
if features is None:
features = 'feats_'
if isinstance(features, Path):
features_q = features
if matches is None:
raise ValueError('Either provide both features and matches as Path'
' or both as names.')
else:
if export_dir is None:
raise ValueError('Provide an export_dir if features and matches'
f' are not file paths: {features}, {matches}.')
features_q = Path(export_dir,
f'{features}{conf["output"]}.h5')
if matches is None:
matches = Path(
export_dir, f'{conf["output"]}_{pairs.stem}.h5')
if features_ref is None:
features_ref = []
elif isinstance(features_ref, list):
features_ref = list(features_ref)
elif isinstance(features_ref, Path):
features_ref = [features_ref]
else:
raise TypeError(str(features_ref))
match_and_assign(conf, pairs, image_dir, matches,
features_q, features_ref,
max_kps, overwrite)
return features_q, matches
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pairs', type=Path, required=True)
parser.add_argument('--image_dir', type=Path, required=True)
parser.add_argument('--export_dir', type=Path, required=True)
parser.add_argument('--matches', type=Path,
default=confs['loftr']['output'])
parser.add_argument('--features', type=str,
default='feats_' + confs['loftr']['output'])
parser.add_argument('--conf', type=str, default='loftr',
choices=list(confs.keys()))
args = parser.parse_args()
main(confs[args.conf], args.pairs, args.image_dir, args.export_dir,
args.matches, args.features)
| 22,408 | 37.371575 | 81 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/r2d2.py | import sys
from pathlib import Path
import torchvision.transforms as tvf
from ..utils.base_model import BaseModel
r2d2_path = Path(__file__).parent / "../../third_party/r2d2"
sys.path.append(str(r2d2_path))
from extract import load_network, NonMaxSuppression, extract_multiscale
class R2D2(BaseModel):
default_conf = {
'model_name': 'r2d2_WASF_N16.pt',
'max_keypoints': 5000,
'scale_factor': 2**0.25,
'min_size': 256,
'max_size': 1024,
'min_scale': 0,
'max_scale': 1,
'reliability_threshold': 0.7,
'repetability_threshold': 0.7,
}
required_inputs = ['image']
def _init(self, conf):
model_fn = r2d2_path / "models" / conf['model_name']
self.norm_rgb = tvf.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.net = load_network(model_fn)
self.detector = NonMaxSuppression(
rel_thr=conf['reliability_threshold'],
rep_thr=conf['repetability_threshold']
)
def _forward(self, data):
img = data['image']
img = self.norm_rgb(img)
xys, desc, scores = extract_multiscale(
self.net, img, self.detector,
scale_f=self.conf['scale_factor'],
min_size=self.conf['min_size'],
max_size=self.conf['max_size'],
min_scale=self.conf['min_scale'],
max_scale=self.conf['max_scale'],
)
idxs = scores.argsort()[-self.conf['max_keypoints'] or None:]
xy = xys[idxs, :2]
desc = desc[idxs].t()
scores = scores[idxs]
pred = {'keypoints': xy[None],
'descriptors': desc[None],
'scores': scores[None]}
return pred
| 1,784 | 30.315789 | 71 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/cosplace.py | '''
Code for loading models trained with CosPlace as a global features extractor
for geolocalization through image retrieval.
Multiple models are available with different backbones. Below is a summary of
models available (backbone : list of available output descriptors
dimensionality). For example you can use a model based on a ResNet50 with
descriptors dimensionality 1024.
ResNet18: [32, 64, 128, 256, 512]
ResNet50: [32, 64, 128, 256, 512, 1024, 2048]
ResNet101: [32, 64, 128, 256, 512, 1024, 2048]
ResNet152: [32, 64, 128, 256, 512, 1024, 2048]
VGG16: [ 64, 128, 256, 512]
CosPlace paper: https://arxiv.org/abs/2204.02287
'''
import torch
import torchvision.transforms as tvf
from ..utils.base_model import BaseModel
class CosPlace(BaseModel):
default_conf = {
'backbone': 'ResNet50',
'fc_output_dim' : 2048
}
required_inputs = ['image']
def _init(self, conf):
self.net = torch.hub.load(
'gmberton/CosPlace',
'get_trained_model',
backbone=conf['backbone'],
fc_output_dim=conf['fc_output_dim']
).eval()
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.norm_rgb = tvf.Normalize(mean=mean, std=std)
def _forward(self, data):
image = self.norm_rgb(data['image'])
desc = self.net(image)
return {
'global_descriptor': desc,
}
| 1,451 | 29.893617 | 77 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/dir.py | import sys
from pathlib import Path
import torch
from zipfile import ZipFile
import os
import sklearn
import gdown
from ..utils.base_model import BaseModel
sys.path.append(str(
Path(__file__).parent / '../../third_party/deep-image-retrieval'))
os.environ['DB_ROOT'] = '' # required by dirtorch
from dirtorch.utils import common # noqa: E402
from dirtorch.extract_features import load_model # noqa: E402
# The DIR model checkpoints (pickle files) include sklearn.decomposition.pca,
# which has been deprecated in sklearn v0.24
# and must be explicitly imported with `from sklearn.decomposition import PCA`.
# This is a hacky workaround to maintain forward compatibility.
sys.modules['sklearn.decomposition.pca'] = sklearn.decomposition._pca
class DIR(BaseModel):
default_conf = {
'model_name': 'Resnet-101-AP-GeM',
'whiten_name': 'Landmarks_clean',
'whiten_params': {
'whitenp': 0.25,
'whitenv': None,
'whitenm': 1.0,
},
'pooling': 'gem',
'gemp': 3,
}
required_inputs = ['image']
dir_models = {
'Resnet-101-AP-GeM': 'https://docs.google.com/uc?export=download&id=1UWJGDuHtzaQdFhSMojoYVQjmCXhIwVvy',
}
def _init(self, conf):
checkpoint = Path(
torch.hub.get_dir(), 'dirtorch', conf['model_name'] + '.pt')
if not checkpoint.exists():
checkpoint.parent.mkdir(exist_ok=True, parents=True)
link = self.dir_models[conf['model_name']]
gdown.download(str(link), str(checkpoint)+'.zip', quiet=False)
zf = ZipFile(str(checkpoint)+'.zip', 'r')
zf.extractall(checkpoint.parent)
zf.close()
os.remove(str(checkpoint)+'.zip')
self.net = load_model(checkpoint, False) # first load on CPU
if conf['whiten_name']:
assert conf['whiten_name'] in self.net.pca
def _forward(self, data):
image = data['image']
assert image.shape[1] == 3
mean = self.net.preprocess['mean']
std = self.net.preprocess['std']
image = image - image.new_tensor(mean)[:, None, None]
image = image / image.new_tensor(std)[:, None, None]
desc = self.net(image)
desc = desc.unsqueeze(0) # batch dimension
if self.conf['whiten_name']:
pca = self.net.pca[self.conf['whiten_name']]
desc = common.whiten_features(
desc.cpu().numpy(), pca, **self.conf['whiten_params'])
desc = torch.from_numpy(desc)
return {
'global_descriptor': desc,
}
| 2,619 | 32.589744 | 111 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/dog.py | import kornia
from kornia.feature.laf import (
laf_from_center_scale_ori, extract_patches_from_pyramid)
import numpy as np
import torch
import pycolmap
from ..utils.base_model import BaseModel
EPS = 1e-6
def sift_to_rootsift(x):
x = x / (np.linalg.norm(x, ord=1, axis=-1, keepdims=True) + EPS)
x = np.sqrt(x.clip(min=EPS))
x = x / (np.linalg.norm(x, axis=-1, keepdims=True) + EPS)
return x
class DoG(BaseModel):
default_conf = {
'options': {
'first_octave': 0,
'peak_threshold': 0.01,
},
'descriptor': 'rootsift',
'max_keypoints': -1,
'patch_size': 32,
'mr_size': 12,
}
required_inputs = ['image']
detection_noise = 1.0
max_batch_size = 1024
def _init(self, conf):
if conf['descriptor'] == 'sosnet':
self.describe = kornia.feature.SOSNet(pretrained=True)
elif conf['descriptor'] == 'hardnet':
self.describe = kornia.feature.HardNet(pretrained=True)
elif conf['descriptor'] not in ['sift', 'rootsift']:
raise ValueError(f'Unknown descriptor: {conf["descriptor"]}')
self.sift = None # lazily instantiated on the first image
self.dummy_param = torch.nn.Parameter(torch.empty(0))
def _forward(self, data):
image = data['image']
image_np = image.cpu().numpy()[0, 0]
assert image.shape[1] == 1
assert image_np.min() >= -EPS and image_np.max() <= 1 + EPS
if self.sift is None:
device = self.dummy_param.device
use_gpu = pycolmap.has_cuda and device.type == 'cuda'
options = {**self.conf['options']}
if self.conf['descriptor'] == 'rootsift':
options['normalization'] = pycolmap.Normalization.L1_ROOT
else:
options['normalization'] = pycolmap.Normalization.L2
self.sift = pycolmap.Sift(
options=pycolmap.SiftExtractionOptions(options),
device=getattr(pycolmap.Device, 'cuda' if use_gpu else 'cpu'))
keypoints, scores, descriptors = self.sift.extract(image_np)
scales = keypoints[:, 2]
oris = np.rad2deg(keypoints[:, 3])
if self.conf['descriptor'] in ['sift', 'rootsift']:
# We still renormalize because COLMAP does not normalize well,
# maybe due to numerical errors
if self.conf['descriptor'] == 'rootsift':
descriptors = sift_to_rootsift(descriptors)
descriptors = torch.from_numpy(descriptors)
elif self.conf['descriptor'] in ('sosnet', 'hardnet'):
center = keypoints[:, :2] + 0.5
laf_scale = scales * self.conf['mr_size'] / 2
laf_ori = -oris
lafs = laf_from_center_scale_ori(
torch.from_numpy(center)[None],
torch.from_numpy(laf_scale)[None, :, None, None],
torch.from_numpy(laf_ori)[None, :, None]).to(image.device)
patches = extract_patches_from_pyramid(
image, lafs, PS=self.conf['patch_size'])[0]
descriptors = patches.new_zeros((len(patches), 128))
if len(patches) > 0:
for start_idx in range(0, len(patches), self.max_batch_size):
end_idx = min(len(patches), start_idx+self.max_batch_size)
descriptors[start_idx:end_idx] = self.describe(
patches[start_idx:end_idx])
else:
raise ValueError(f'Unknown descriptor: {self.conf["descriptor"]}')
keypoints = torch.from_numpy(keypoints[:, :2]) # keep only x, y
scales = torch.from_numpy(scales)
oris = torch.from_numpy(oris)
scores = torch.from_numpy(scores)
if self.conf['max_keypoints'] != -1:
# TODO: check that the scores from PyCOLMAP are 100% correct,
# follow https://github.com/mihaidusmanu/pycolmap/issues/8
indices = torch.topk(scores, self.conf['max_keypoints'])
keypoints = keypoints[indices]
scales = scales[indices]
oris = oris[indices]
scores = scores[indices]
descriptors = descriptors[indices]
return {
'keypoints': keypoints[None],
'scales': scales[None],
'oris': oris[None],
'scores': scores[None],
'descriptors': descriptors.T[None],
}
| 4,457 | 37.431034 | 78 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/superpoint.py | import sys
from pathlib import Path
import torch
from ..utils.base_model import BaseModel
sys.path.append(str(Path(__file__).parent / '../../third_party'))
from SuperGluePretrainedNetwork.models import superpoint # noqa E402
# The original keypoint sampling is incorrect. We patch it here but
# we don't fix it upstream to not impact exisiting evaluations.
def sample_descriptors_fix_sampling(keypoints, descriptors, s: int = 8):
""" Interpolate descriptors at keypoint locations """
b, c, h, w = descriptors.shape
keypoints = (keypoints + 0.5) / (keypoints.new_tensor([w, h]) * s)
keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
descriptors = torch.nn.functional.grid_sample(
descriptors, keypoints.view(b, 1, -1, 2),
mode='bilinear', align_corners=False)
descriptors = torch.nn.functional.normalize(
descriptors.reshape(b, c, -1), p=2, dim=1)
return descriptors
class SuperPoint(BaseModel):
default_conf = {
'nms_radius': 4,
'keypoint_threshold': 0.005,
'max_keypoints': -1,
'remove_borders': 4,
'fix_sampling': False,
}
required_inputs = ['image']
detection_noise = 2.0
def _init(self, conf):
if conf['fix_sampling']:
superpoint.sample_descriptors = sample_descriptors_fix_sampling
self.net = superpoint.SuperPoint(conf)
def _forward(self, data):
return self.net(data)
| 1,439 | 31.727273 | 75 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/netvlad.py | from pathlib import Path
import subprocess
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from scipy.io import loadmat
from ..utils.base_model import BaseModel
logger = logging.getLogger(__name__)
EPS = 1e-6
class NetVLADLayer(nn.Module):
def __init__(self, input_dim=512, K=64, score_bias=False, intranorm=True):
super().__init__()
self.score_proj = nn.Conv1d(
input_dim, K, kernel_size=1, bias=score_bias)
centers = nn.parameter.Parameter(torch.empty([input_dim, K]))
nn.init.xavier_uniform_(centers)
self.register_parameter('centers', centers)
self.intranorm = intranorm
self.output_dim = input_dim * K
def forward(self, x):
b = x.size(0)
scores = self.score_proj(x)
scores = F.softmax(scores, dim=1)
diff = (x.unsqueeze(2) - self.centers.unsqueeze(0).unsqueeze(-1))
desc = (scores.unsqueeze(1) * diff).sum(dim=-1)
if self.intranorm:
# From the official MATLAB implementation.
desc = F.normalize(desc, dim=1)
desc = desc.view(b, -1)
desc = F.normalize(desc, dim=1)
return desc
class NetVLAD(BaseModel):
default_conf = {
'model_name': 'VGG16-NetVLAD-Pitts30K',
'whiten': True
}
required_inputs = ['image']
# Models exported using
# https://github.com/uzh-rpg/netvlad_tf_open/blob/master/matlab/net_class2struct.m.
dir_models = {
'VGG16-NetVLAD-Pitts30K': 'https://cvg-data.inf.ethz.ch/hloc/netvlad/Pitts30K_struct.mat',
'VGG16-NetVLAD-TokyoTM': 'https://cvg-data.inf.ethz.ch/hloc/netvlad/TokyoTM_struct.mat'
}
def _init(self, conf):
assert conf['model_name'] in self.dir_models.keys()
# Download the checkpoint.
checkpoint = Path(
torch.hub.get_dir(), 'netvlad', conf['model_name'] + '.mat')
if not checkpoint.exists():
checkpoint.parent.mkdir(exist_ok=True, parents=True)
link = self.dir_models[conf['model_name']]
cmd = ['wget', link, '-O', str(checkpoint)]
logger.info(f'Downloading the NetVLAD model with `{cmd}`.')
subprocess.run(cmd, check=True)
# Create the network.
# Remove classification head.
backbone = list(models.vgg16().children())[0]
# Remove last ReLU + MaxPool2d.
self.backbone = nn.Sequential(*list(backbone.children())[: -2])
self.netvlad = NetVLADLayer()
if conf['whiten']:
self.whiten = nn.Linear(self.netvlad.output_dim, 4096)
# Parse MATLAB weights using https://github.com/uzh-rpg/netvlad_tf_open
mat = loadmat(checkpoint, struct_as_record=False, squeeze_me=True)
# CNN weights.
for layer, mat_layer in zip(self.backbone.children(),
mat['net'].layers):
if isinstance(layer, nn.Conv2d):
w = mat_layer.weights[0] # Shape: S x S x IN x OUT
b = mat_layer.weights[1] # Shape: OUT
# Prepare for PyTorch - enforce float32 and right shape.
# w should have shape: OUT x IN x S x S
# b should have shape: OUT
w = torch.tensor(w).float().permute([3, 2, 0, 1])
b = torch.tensor(b).float()
# Update layer weights.
layer.weight = nn.Parameter(w)
layer.bias = nn.Parameter(b)
# NetVLAD weights.
score_w = mat['net'].layers[30].weights[0] # D x K
# centers are stored as opposite in official MATLAB code
center_w = -mat['net'].layers[30].weights[1] # D x K
# Prepare for PyTorch - make sure it is float32 and has right shape.
# score_w should have shape K x D x 1
# center_w should have shape D x K
score_w = torch.tensor(score_w).float().permute([1, 0]).unsqueeze(-1)
center_w = torch.tensor(center_w).float()
# Update layer weights.
self.netvlad.score_proj.weight = nn.Parameter(score_w)
self.netvlad.centers = nn.Parameter(center_w)
# Whitening weights.
if conf['whiten']:
w = mat['net'].layers[33].weights[0] # Shape: 1 x 1 x IN x OUT
b = mat['net'].layers[33].weights[1] # Shape: OUT
# Prepare for PyTorch - make sure it is float32 and has right shape
w = torch.tensor(w).float().squeeze().permute([1, 0]) # OUT x IN
b = torch.tensor(b.squeeze()).float() # Shape: OUT
# Update layer weights.
self.whiten.weight = nn.Parameter(w)
self.whiten.bias = nn.Parameter(b)
# Preprocessing parameters.
self.preprocess = {
'mean': mat['net'].meta.normalization.averageImage[0, 0],
'std': np.array([1, 1, 1], dtype=np.float32)
}
def _forward(self, data):
image = data['image']
assert image.shape[1] == 3
assert image.min() >= -EPS and image.max() <= 1 + EPS
image = torch.clamp(image * 255, 0.0, 255.0) # Input should be 0-255.
mean = self.preprocess['mean']
std = self.preprocess['std']
image = image - image.new_tensor(mean).view(1, -1, 1, 1)
image = image / image.new_tensor(std).view(1, -1, 1, 1)
# Feature extraction.
descriptors = self.backbone(image)
b, c, _, _ = descriptors.size()
descriptors = descriptors.view(b, c, -1)
# NetVLAD layer.
descriptors = F.normalize(descriptors, dim=1) # Pre-normalization.
desc = self.netvlad(descriptors)
# Whiten if needed.
if hasattr(self, 'whiten'):
desc = self.whiten(desc)
desc = F.normalize(desc, dim=1) # Final L2 normalization.
return {
'global_descriptor': desc
}
| 5,941 | 37.089744 | 98 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/openibl.py | import torch
import torchvision.transforms as tvf
from ..utils.base_model import BaseModel
class OpenIBL(BaseModel):
default_conf = {
'model_name': 'vgg16_netvlad',
}
required_inputs = ['image']
def _init(self, conf):
self.net = torch.hub.load(
'yxgeee/OpenIBL', conf['model_name'], pretrained=True).eval()
mean = [0.48501960784313836, 0.4579568627450961, 0.4076039215686255]
std = [0.00392156862745098, 0.00392156862745098, 0.00392156862745098]
self.norm_rgb = tvf.Normalize(mean=mean, std=std)
def _forward(self, data):
image = self.norm_rgb(data['image'])
desc = self.net(image)
return {
'global_descriptor': desc,
}
| 741 | 27.538462 | 77 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extractors/d2net.py | import sys
from pathlib import Path
import subprocess
import torch
from ..utils.base_model import BaseModel
d2net_path = Path(__file__).parent / '../../third_party/d2net'
sys.path.append(str(d2net_path))
from lib.model_test import D2Net as _D2Net
from lib.pyramid import process_multiscale
class D2Net(BaseModel):
default_conf = {
'model_name': 'd2_tf.pth',
'checkpoint_dir': d2net_path / 'models',
'use_relu': True,
'multiscale': False,
}
required_inputs = ['image']
def _init(self, conf):
model_file = conf['checkpoint_dir'] / conf['model_name']
if not model_file.exists():
model_file.parent.mkdir(exist_ok=True)
cmd = ['wget', 'https://dsmn.ml/files/d2-net/'+conf['model_name'],
'-O', str(model_file)]
subprocess.run(cmd, check=True)
self.net = _D2Net(
model_file=model_file,
use_relu=conf['use_relu'],
use_cuda=False)
def _forward(self, data):
image = data['image']
image = image.flip(1) # RGB -> BGR
norm = image.new_tensor([103.939, 116.779, 123.68])
image = (image * 255 - norm.view(1, 3, 1, 1)) # caffe normalization
if self.conf['multiscale']:
keypoints, scores, descriptors = process_multiscale(
image, self.net)
else:
keypoints, scores, descriptors = process_multiscale(
image, self.net, scales=[1])
keypoints = keypoints[:, [1, 0]] # (x, y) and remove the scale
return {
'keypoints': torch.from_numpy(keypoints)[None],
'scores': torch.from_numpy(scores)[None],
'descriptors': torch.from_numpy(descriptors.T)[None],
}
| 1,772 | 31.236364 | 78 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/matchers/nearest_neighbor.py | import torch
from ..utils.base_model import BaseModel
def find_nn(sim, ratio_thresh, distance_thresh):
sim_nn, ind_nn = sim.topk(2 if ratio_thresh else 1, dim=-1, largest=True)
dist_nn = 2 * (1 - sim_nn)
mask = torch.ones(ind_nn.shape[:-1], dtype=torch.bool, device=sim.device)
if ratio_thresh:
mask = mask & (dist_nn[..., 0] <= (ratio_thresh**2)*dist_nn[..., 1])
if distance_thresh:
mask = mask & (dist_nn[..., 0] <= distance_thresh**2)
matches = torch.where(mask, ind_nn[..., 0], ind_nn.new_tensor(-1))
scores = torch.where(mask, (sim_nn[..., 0]+1)/2, sim_nn.new_tensor(0))
return matches, scores
def mutual_check(m0, m1):
inds0 = torch.arange(m0.shape[-1], device=m0.device)
loop = torch.gather(m1, -1, torch.where(m0 > -1, m0, m0.new_tensor(0)))
ok = (m0 > -1) & (inds0 == loop)
m0_new = torch.where(ok, m0, m0.new_tensor(-1))
return m0_new
class NearestNeighbor(BaseModel):
default_conf = {
'ratio_threshold': None,
'distance_threshold': None,
'do_mutual_check': True,
}
required_inputs = ['descriptors0', 'descriptors1']
def _init(self, conf):
pass
def _forward(self, data):
if data['descriptors0'].size(-1) == 0 or data['descriptors1'].size(-1) == 0:
matches0 = torch.full(
data['descriptors0'].shape[:2], -1,
device=data['descriptors0'].device)
return {
'matches0': matches0,
'matching_scores0': torch.zeros_like(matches0)
}
ratio_threshold = self.conf['ratio_threshold']
if data['descriptors0'].size(-1) == 1 or data['descriptors1'].size(-1) == 1:
ratio_threshold = None
sim = torch.einsum(
'bdn,bdm->bnm', data['descriptors0'], data['descriptors1'])
matches0, scores0 = find_nn(
sim, ratio_threshold, self.conf['distance_threshold'])
if self.conf['do_mutual_check']:
matches1, scores1 = find_nn(
sim.transpose(1, 2), ratio_threshold,
self.conf['distance_threshold'])
matches0 = mutual_check(matches0, matches1)
return {
'matches0': matches0,
'matching_scores0': scores0,
}
| 2,292 | 35.396825 | 84 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/matchers/loftr.py | import torch
import warnings
from kornia.feature.loftr.loftr import default_cfg
from kornia.feature import LoFTR as LoFTR_
from ..utils.base_model import BaseModel
class LoFTR(BaseModel):
default_conf = {
'weights': 'outdoor',
'match_threshold': 0.2,
'max_num_matches': None,
}
required_inputs = [
'image0',
'image1'
]
def _init(self, conf):
cfg = default_cfg
cfg['match_coarse']['thr'] = conf['match_threshold']
self.net = LoFTR_(pretrained=conf['weights'], config=cfg)
def _forward(self, data):
# For consistency with hloc pairs, we refine kpts in image0!
rename = {
'keypoints0': 'keypoints1',
'keypoints1': 'keypoints0',
'image0': 'image1',
'image1': 'image0',
'mask0': 'mask1',
'mask1': 'mask0',
}
data_ = {rename[k]: v for k, v in data.items()}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pred = self.net(data_)
scores = pred['confidence']
top_k = self.conf['max_num_matches']
if top_k is not None and len(scores) > top_k:
keep = torch.argsort(scores, descending=True)[:top_k]
pred['keypoints0'], pred['keypoints1'] =\
pred['keypoints0'][keep], pred['keypoints1'][keep]
scores = scores[keep]
# Switch back indices
pred = {(rename[k] if k in rename else k): v for k, v in pred.items()}
pred['scores'] = scores
del pred['confidence']
return pred
| 1,618 | 28.981481 | 78 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/matchers/adalam.py | import torch
from ..utils.base_model import BaseModel
from kornia.feature.adalam import AdalamFilter
from kornia.utils.helpers import get_cuda_device_if_available
class AdaLAM(BaseModel):
# See https://kornia.readthedocs.io/en/latest/_modules/kornia/feature/adalam/adalam.html.
default_conf = {
'area_ratio': 100,
'search_expansion': 4,
'ransac_iters': 128,
'min_inliers': 6,
'min_confidence': 200,
'orientation_difference_threshold': 30,
'scale_rate_threshold': 1.5,
'detected_scale_rate_threshold': 5,
'refit': True,
'force_seed_mnn': True,
'device': get_cuda_device_if_available()
}
required_inputs = [
'image0', 'image1',
'descriptors0', 'descriptors1',
'keypoints0', 'keypoints1',
'scales0', 'scales1',
'oris0', 'oris1']
def _init(self, conf):
self.adalam = AdalamFilter(conf)
def _forward(self, data):
assert data['keypoints0'].size(0) == 1
if data['keypoints0'].size(1) < 2 or data['keypoints1'].size(1) < 2:
matches = torch.zeros(
(0, 2), dtype=torch.int64,
device=data['keypoints0'].device)
else:
matches = self.adalam.match_and_filter(
data['keypoints0'][0], data['keypoints1'][0],
data['descriptors0'][0].T, data['descriptors1'][0].T,
data['image0'].shape[2:], data['image1'].shape[2:],
data['oris0'][0], data['oris1'][0],
data['scales0'][0], data['scales1'][0]
)
matches_new = torch.full(
(data['keypoints0'].size(1),), -1,
dtype=torch.int64, device=data['keypoints0'].device)
matches_new[matches[:, 0]] = matches[:, 1]
return {
'matches0': matches_new.unsqueeze(0),
'matching_scores0': torch.zeros(matches_new.size(0)).unsqueeze(0)
}
| 1,965 | 34.107143 | 93 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/utils/base_model.py | import sys
from abc import ABCMeta, abstractmethod
from torch import nn
from copy import copy
import inspect
class BaseModel(nn.Module, metaclass=ABCMeta):
default_conf = {}
required_inputs = []
def __init__(self, conf):
"""Perform some logic and call the _init method of the child model."""
super().__init__()
self.conf = conf = {**self.default_conf, **conf}
self.required_inputs = copy(self.required_inputs)
self._init(conf)
sys.stdout.flush()
def forward(self, data):
"""Check the data and call the _forward method of the child model."""
for key in self.required_inputs:
assert key in data, 'Missing key {} in data'.format(key)
return self._forward(data)
@abstractmethod
def _init(self, conf):
"""To be implemented by the child class."""
raise NotImplementedError
@abstractmethod
def _forward(self, data):
"""To be implemented by the child class."""
raise NotImplementedError
def dynamic_load(root, model):
module_path = f'{root.__name__}.{model}'
module = __import__(module_path, fromlist=[''])
classes = inspect.getmembers(module, inspect.isclass)
# Filter classes defined in the module
classes = [c for c in classes if c[1].__module__ == module_path]
# Filter classes inherited from BaseModel
classes = [c for c in classes if issubclass(c[1], BaseModel)]
assert len(classes) == 1, classes
return classes[0][1]
# return getattr(module, 'Model')
| 1,546 | 31.229167 | 78 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/pipelines/7Scenes/create_gt_sfm.py | from pathlib import Path
import numpy as np
import torch
import PIL.Image
from tqdm import tqdm
import pycolmap
from ...utils.read_write_model import write_model, read_model
def scene_coordinates(p2D, R_w2c, t_w2c, depth, camera):
assert len(depth) == len(p2D)
ret = pycolmap.image_to_world(p2D, camera._asdict())
p2D_norm = np.asarray(ret['world_points'])
p2D_h = np.concatenate([p2D_norm, np.ones_like(p2D_norm[:, :1])], 1)
p3D_c = p2D_h * depth[:, None]
p3D_w = (p3D_c - t_w2c) @ R_w2c
return p3D_w
def interpolate_depth(depth, kp):
h, w = depth.shape
kp = kp / np.array([[w-1, h-1]]) * 2 - 1
assert np.all(kp > -1) and np.all(kp < 1)
depth = torch.from_numpy(depth)[None, None]
kp = torch.from_numpy(kp)[None, None]
grid_sample = torch.nn.functional.grid_sample
# To maximize the number of points that have depth:
# do bilinear interpolation first and then nearest for the remaining points
interp_lin = grid_sample(
depth, kp, align_corners=True, mode='bilinear')[0, :, 0]
interp_nn = torch.nn.functional.grid_sample(
depth, kp, align_corners=True, mode='nearest')[0, :, 0]
interp = torch.where(torch.isnan(interp_lin), interp_nn, interp_lin)
valid = ~torch.any(torch.isnan(interp), 0)
interp_depth = interp.T.numpy().flatten()
valid = valid.numpy()
return interp_depth, valid
def image_path_to_rendered_depth_path(image_name):
parts = image_name.split('/')
name = '_'.join([''.join(parts[0].split('-')), parts[1]])
name = name.replace('color', 'pose')
name = name.replace('png', 'depth.tiff')
return name
def project_to_image(p3D, R, t, camera, eps: float = 1e-4, pad: int = 1):
p3D = (p3D @ R.T) + t
visible = p3D[:, -1] >= eps # keep points in front of the camera
p2D_norm = p3D[:, :-1] / p3D[:, -1:].clip(min=eps)
ret = pycolmap.world_to_image(p2D_norm, camera._asdict())
p2D = np.asarray(ret['image_points'])
size = np.array([camera.width - pad - 1, camera.height - pad - 1])
valid = np.all((p2D >= pad) & (p2D <= size), -1)
valid &= visible
return p2D[valid], valid
def correct_sfm_with_gt_depth(sfm_path, depth_folder_path, output_path):
cameras, images, points3D = read_model(sfm_path)
for imgid, img in tqdm(images.items()):
image_name = img.name
depth_name = image_path_to_rendered_depth_path(image_name)
depth = PIL.Image.open(Path(depth_folder_path) / depth_name)
depth = np.array(depth).astype('float64')
depth = depth/1000. # mm to meter
depth[(depth == 0.0) | (depth > 1000.0)] = np.nan
R_w2c, t_w2c = img.qvec2rotmat(), img.tvec
camera = cameras[img.camera_id]
p3D_ids = img.point3D_ids
p3Ds = np.stack([points3D[i].xyz for i in p3D_ids[p3D_ids != -1]], 0)
p2Ds, valids_projected = project_to_image(p3Ds, R_w2c, t_w2c, camera)
invalid_p3D_ids = p3D_ids[p3D_ids != -1][~valids_projected]
interp_depth, valids_backprojected = interpolate_depth(depth, p2Ds)
scs = scene_coordinates(p2Ds[valids_backprojected], R_w2c, t_w2c,
interp_depth[valids_backprojected],
camera)
invalid_p3D_ids = np.append(
invalid_p3D_ids,
p3D_ids[p3D_ids != -1][valids_projected][~valids_backprojected])
for p3did in invalid_p3D_ids:
if p3did == -1:
continue
else:
obs_imgids = points3D[p3did].image_ids
invalid_imgids = list(np.where(obs_imgids == img.id)[0])
points3D[p3did] = points3D[p3did]._replace(
image_ids=np.delete(obs_imgids, invalid_imgids),
point2D_idxs=np.delete(points3D[p3did].point2D_idxs,
invalid_imgids))
new_p3D_ids = p3D_ids.copy()
sub_p3D_ids = new_p3D_ids[new_p3D_ids != -1]
valids = np.ones(np.count_nonzero(new_p3D_ids != -1), dtype=bool)
valids[~valids_projected] = False
valids[valids_projected] = valids_backprojected
sub_p3D_ids[~valids] = -1
new_p3D_ids[new_p3D_ids != -1] = sub_p3D_ids
img = img._replace(point3D_ids=new_p3D_ids)
assert len(img.point3D_ids[img.point3D_ids != -1]) == len(scs), (
f"{len(scs)}, {len(img.point3D_ids[img.point3D_ids != -1])}")
for i, p3did in enumerate(img.point3D_ids[img.point3D_ids != -1]):
points3D[p3did] = points3D[p3did]._replace(xyz=scs[i])
images[imgid] = img
output_path.mkdir(parents=True, exist_ok=True)
write_model(cameras, images, points3D, output_path)
if __name__ == '__main__':
dataset = Path('datasets/7scenes')
outputs = Path('outputs/7Scenes')
SCENES = ['chess', 'fire', 'heads', 'office', 'pumpkin',
'redkitchen', 'stairs']
for scene in SCENES:
sfm_path = outputs / scene / 'sfm_superpoint+superglue'
depth_path = dataset / f'depth/7scenes_{scene}/train/depth'
output_path = outputs / scene / 'sfm_superpoint+superglue+depth'
correct_sfm_with_gt_depth(sfm_path, depth_path, output_path)
| 5,227 | 39.527132 | 79 | py |
batch-bandits | batch-bandits-main/CMAB/offline_evaluator.py | from matplotlib import pyplot as plt
from torch.utils.data import Dataset
from basics.base_agent import BaseAgent
class OfflineEvaluator:
def __init__(self, eval_info=None):
if eval_info is None:
eval_info = {}
self.dataset = eval_info['dataset']
self.agent = eval_info['agent']
if not isinstance(self.dataset, Dataset):
raise TypeError('dataset ' + "must be a " + str(Dataset))
if not isinstance(self.agent, BaseAgent):
raise TypeError('agent ' + "must be a " + str(BaseAgent))
self.total_reward = None
self.average_reward = None
self.num_matches = None
self.idxs = range(self.dataset.__len__())
self.counter = None
def eval_start(self):
self.total_reward = 0
self.average_reward = [0]
self.num_matches = 0
self.idxs = range(self.dataset.__len__())
self.counter = 0
def _get_observation(self):
idx = self.idxs[self.counter]
self.counter += 1
return self.dataset.__getitem__(idx)
def eval_step(self):
observation = self._get_observation()
state = observation[0]
true_action = observation[1]
reward = observation[2]
pred_action = self.agent.agent_policy(state)
if true_action != pred_action:
return
self.num_matches += 1
aw_reward = self.average_reward[-1] + (reward - self.average_reward[-1]) / self.num_matches
self.average_reward.append(aw_reward)
self.total_reward += reward
def eval_run(self):
self.eval_start()
while self.counter < self.dataset.__len__():
self.eval_step()
return self.average_reward
if __name__ == '__main__':
import matplotlib.pyplot as plt
from basics.random_agent import RandomAgent
from utilities.dataloader import BanditDataset
dir1 = 'C:/Users/provo501/Documents/GitHub/batch-bandits/experiments/data/mushroom_data_final.pickle'
ra = RandomAgent()
agent_info = {'num_actions': 2}
ra.agent_init(agent_info)
result = []
result1 = []
for seed_ in [1, 5, 10]: # , 2, 3, 32, 123, 76, 987, 2134]:
dataset = BanditDataset(pickle_file=dir1, seed=seed_)
eval_info = {'dataset': dataset, 'agent': ra}
evaluator = OfflineEvaluator(eval_info)
reward = evaluator.eval_run()
result.append(reward)
result1.append(evaluator.total_reward)
for elem in result:
plt.plot(elem)
plt.legend()
plt.show()
| 2,575 | 25.833333 | 105 | py |
batch-bandits | batch-bandits-main/utilities/dataloader.py | import pickle
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from utilities.data_generator import generate_samples
def data_randomizer(pickle_file, seed=None):
if isinstance(pickle_file, str):
with open(pickle_file, 'rb') as f:
dataset = pickle.load(f)
else:
dataset = pickle_file
actions = sorted(dataset.iloc[:, -3].unique().tolist())
tst_smpl = pd.DataFrame().reindex_like(dataset).dropna()
ratio = 0.1
for action in actions:
action_subsample = dataset[dataset.iloc[:, -3] == action]
action_drop, action_use = train_test_split(action_subsample.index, test_size=ratio,
random_state=seed,
stratify=action_subsample.iloc[:, -2])
tst_smpl = pd.concat([tst_smpl,
action_subsample.loc[action_use]]).sample(frac=1, random_state=seed)
tst_smpl = tst_smpl.reset_index(drop=True)
del action_drop, action_use
X = tst_smpl.iloc[:, :-3].to_numpy()
A = tst_smpl.iloc[:, -3].to_numpy()
Y = tst_smpl.iloc[:, -2].to_numpy()
probs = tst_smpl.iloc[:, -1].to_numpy()
return X, A, Y/probs
class BanditDataset(Dataset):
def __init__(self, pickle_file, seed=None):
# load dataset
X, A, Y = data_randomizer(pickle_file, seed)
self.features = X
self.actions = A
self.rewards = Y
def __len__(self):
return len(self.rewards)
def __getitem__(self, idx):
feature_vec = self.features[idx]
action = self.actions[idx]
reward = self.rewards[idx]
return feature_vec, action, reward
if __name__ == '__main__':
dir = 'C:/Users/provo501/Documents/assignment/data/preprocessed_hidden_data.pickle'
data = data_randomizer(dir)
dataset = BanditDataset(pickle_file=dir, seed=1)
print(len(dataset))
print(dataset.__len__())
print(dataset[420])
print(dataset[421])
print(dataset[0])
print(dataset[1])
dl = DataLoader(dataset, batch_size=2, shuffle=True)
print(next(iter(dl)))
dataset = generate_samples(100000, 4, 3, True)
dataset = BanditDataset(pickle_file=dataset, seed=1)
print(len(dataset))
print(dataset.__len__())
print(dataset[420])
print(dataset[421])
print(dataset[0])
print(dataset[1])
| 2,456 | 28.25 | 98 | py |
ecg-classification-quantized-cnn | ecg-classification-quantized-cnn-main/training.py | import torch
import torchvision
import torch.quantization
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pickle as pk
import pandas as pd
import wfdb
import pywt
#import h5py
import math
import os
import sys
import argparse
from pathlib import Path
import shutil
import copy
import time
import json
import itertools
import threading
from torch.quantization import QuantStub, DeQuantStub
from pathlib import Path
from torch.utils import data
class color:
NONE = ''
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
parser = argparse.ArgumentParser()
parser.add_argument('-n','--name', dest='name', required=True, help="session name")
parser.add_argument('-e','--epoch', dest='epoch', required=True, type=int, help="number of epochs")
parser.add_argument('-d','--dataset', dest='dataset', required=True, choices=['NLRAV', 'NSVFQ', 'NLRAVU', 'NSVFQU', 'NSV', 'SV'], help="choice of dataset between NLRAV or NSVFQ")
parser.add_argument('-s','--split', dest='split', default='0.7', help="choice of dataset splitting")
parser.add_argument('-o','--overwrite', dest='overwrite', action='store_true', help="overwrite the session if it already exists")
parser.add_argument('-b','--batchsize', dest='batchsize', default=32, type=int, help="batchsize value")
parser.add_argument('-a','--augmentation', dest='augmentation', nargs=2, type=int, default=[0,1], help='augmentation, number of lateral shifts and pitch (two arguments)')
parser.add_argument('-r','--randomseed', dest='randomseed', type=int, default=0, help='random seed for dataset randomization')
parser.add_argument('-p','-.peak', dest='peak', help='peak detector path')
parser.add_argument('--norm', dest='normalization', action='store_true', help="during training, scales all inputs so that its absolute value is equal to 1")
parser.add_argument('--indim', dest='indimension', default=198, type=int, help="input dimension")
parser.add_argument('--ksize', dest='ksize', default=7, type=int, help="kernel size")
parser.add_argument('--conv1of', dest='conv1of', default=20, type=int, help="conv 1 output features value")
parser.add_argument('--conv2of', dest='conv2of', default=20, type=int, help="conv 2 output features value")
parser.add_argument('--foutdim', dest='foutdim', default=100, type=int, help="fully connected 1 output dimension")
args = parser.parse_args()
session_name = args.name
session_path = "output/train/"+session_name+"/"
if os.path.isdir(session_path):
if args.overwrite:
try:
shutil.rmtree(session_path)
Path(session_path).mkdir(parents=True, exist_ok=True)
Path(session_path+'inference_data_example').mkdir(parents=True, exist_ok=True)
Path(session_path+'parameters').mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation ("+session_path+").")
exit()
else:
# print("Session already exists ("+session_path+"), overwrite the session? (y/n): ", end='')
# force_write = input()
# if force_write == "y":
# print('')
# try:
# shutil.rmtree(session_path)
# Path(session_path).mkdir(parents=True, exist_ok=True)
# Path(session_path+'inference_data_example').mkdir(parents=True, exist_ok=True)
# Path(session_path+'parameters').mkdir(parents=True, exist_ok=True)
# except OSError:
# print("Error in session creation ("+session_path+").")
# exit()
# else:
# exit()
print(f'Session path ({session_path}) already exists')
exit()
else:
try:
Path(session_path).mkdir(parents=True, exist_ok=True)
Path(session_path+'inference_data_example').mkdir(parents=True, exist_ok=True)
Path(session_path+'parameters').mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation ("+session_path+").")
exit()
print(f'{color.BOLD}Starting {color.NONE}training{color.END}{color.BOLD} session \'{session_name}\'\n\n\n{color.END}')
#██╗ ██╗ █████╗ ██████╗ ██╗ ██████╗ ██╗ ██╗███████╗
#██║ ██║██╔══██╗██╔══██╗██║██╔═══██╗██║ ██║██╔════╝
#██║ ██║███████║██████╔╝██║██║ ██║██║ ██║███████╗
#╚██╗ ██╔╝██╔══██║██╔══██╗██║██║ ██║██║ ██║╚════██║
# ╚████╔╝ ██║ ██║██║ ██║██║╚██████╔╝╚██████╔╝███████║
# ╚═══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def print_size_of_model(model):
torch.save(model.state_dict(), session_path+"temp.p")
print('Size model (MB):', os.path.getsize(session_path+"temp.p")/1e6)
os.remove('temp.p')
def save_model(model, n):
torch.save(model.state_dict(), session_path+"model"+n+".pth")
t_done = False
def animate(prefix = ''):
for c in itertools.cycle(['|', '/', '-', '\\']):
if t_done:
break
print('\r' + prefix + c, end = '\r')
# sys.stdout.write('\r' + prefix + c)
# sys.stdout.flush()
time.sleep(0.2)
print('\r' + prefix + 'Done!')
# sys.stdout.write('\r' + prefix + 'Done!')
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print(f'\r{prefix} |{bar}| Done! {suffix}')
# ██╗ ██╗██████╗ ███████╗███████╗ ██╗ ██████╗ █████╗ ██████╗ ███████╗██████╗
# ██║ ██║██╔══██╗██╔════╝██╔════╝ ██║ ██╔═══██╗██╔══██╗██╔══██╗██╔════╝██╔══██╗
# ███████║██║ ██║█████╗ ███████╗ ██║ ██║ ██║███████║██║ ██║█████╗ ██████╔╝
# ██╔══██║██║ ██║██╔══╝ ╚════██║ ██║ ██║ ██║██╔══██║██║ ██║██╔══╝ ██╔══██╗
# ██║ ██║██████╔╝██║ ███████║ ███████╗╚██████╔╝██║ ██║██████╔╝███████╗██║ ██║
# ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚══════╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚══════╝╚═╝ ╚═╝
# class HDF5Dataset(data.Dataset):
# """Represents an abstract HDF5 dataset.
#
# Input params:
# file_path: Path to the folder containing the dataset (one or multiple HDF5 files).
# recursive: If True, searches for h5 files in subdirectories.
# load_data: If True, loads all the data immediately into RAM. Use this if
# the dataset is fits into memory. Otherwise, leave this at false and
# the data will load lazily.
# data_cache_size: Number of HDF5 files that can be cached in the cache (default=3).
# transform: PyTorch transform to apply to every data instance (default=None).
# """
# def __init__(self, file_path, recursive, load_data, data_cache_size=3, transform=None):
# super().__init__()
# self.data_info = []
# self.data_cache = {}
# self.data_cache_size = data_cache_size
# self.transform = transform
#
# # Search for all h5 files
# p = Path(file_path)
# assert(p.is_dir())
# if recursive:
# files = sorted(p.glob('**/*.h5'))
# else:
# files = sorted(p.glob('*.h5'))
# if len(files) < 1:
# raise RuntimeError('No hdf5 datasets found')
#
# for h5dataset_fp in files:
# self._add_data_infos(str(h5dataset_fp.resolve()), load_data)
#
# def __getitem__(self, index):
# # get data
# x = self.get_data("X_data", index)
# # print(x)
# x = np.expand_dims(x,axis=1)
# if self.transform:
# x = self.transform(x)
# else:
# x = torch.from_numpy(x)
#
# # get label
# y = self.get_data("y_data", index)
# # print(y)
# # exit()
# y = torch.from_numpy(y)
# y=y.max(dim=1)[1]
# return (x, y)
#
# def __len__(self):
# return len(self.get_data_infos('X_data'))
#
# def _add_data_infos(self, file_path, load_data):
# with h5py.File(file_path, 'r') as h5_file:
# # Walk through all groups, extracting datasets
# for dname, ds in h5_file.items():
# # if data is not loaded its cache index is -1
# idx = -1
# if load_data:
# # add data to the data cache
# # idx = self._add_to_cache(ds.value, file_path)
# idx = self._add_to_cache(ds[()], file_path)
#
# # type is derived from the name of the dataset; we expect the dataset
# # name to have a name such as 'data' or 'label' to identify its type
# # we also store the shape of the data in case we need it
# # self.data_info.append({'file_path': file_path, 'type': dname, 'shape': ds.value.shape, 'cache_idx': idx})
# self.data_info.append({'file_path': file_path, 'type': dname, 'shape': ds[()].shape, 'cache_idx': idx})
#
# def _load_data(self, file_path):
# """Load data to the cache given the file
# path and update the cache index in the
# data_info structure.
# """
#
# with h5py.File(file_path, 'r') as h5_file:
# for gname, group in h5_file.items():
# for dname, ds in group.items():
# # add data to the data cache and retrieve
# # the cache index
# idx = self._add_to_cache(ds.value, file_path)
#
# # find the beginning index of the hdf5 file we are looking for
# file_idx = next(i for i,v in enumerate(self.data_info) if v['file_path'] == file_path)
#
# # the data info should have the same index since we loaded it in the same way
# self.data_info[file_idx + idx]['cache_idx'] = idx
#
# # remove an element from data cache if size was exceeded
# if len(self.data_cache) > self.data_cache_size:
# # remove one item from the cache at random
# removal_keys = list(self.data_cache)
# removal_keys.remove(file_path)
# self.data_cache.pop(removal_keys[0])
# # remove invalid cache_idx
# self.data_info = [{'file_path': di['file_path'], 'type': di['type'], 'shape': di['shape'], 'cache_idx': -1} if di['file_path'] == removal_keys[0] else di for di in self.data_info]
#
# def _add_to_cache(self, data, file_path):
# """Adds data to the cache and returns its index. There is one cache
# list for every file_path, containing all datasets in that file.
# """
# if file_path not in self.data_cache:
# self.data_cache[file_path] = [data]
# else:
# self.data_cache[file_path].append(data)
# return len(self.data_cache[file_path]) - 1
#
# def get_data_infos(self, type):
# """Get data infos belonging to a certain type of data.
# """
# data_info_type = [di for di in self.data_info if di['type'] == type]
# return data_info_type
#
# def get_data(self, type, i):
# """Call this function anytime you want to access a chunk of data from the
# dataset. This will make sure that the data is loaded in case it is
# not part of the data cache.
# """
# fp = self.get_data_infos(type)[i]['file_path']
# if fp not in self.data_cache:
# self._load_data(fp)
#
# # get new cache_idx assigned by _load_data_info
# cache_idx = self.get_data_infos(type)[i]['cache_idx']
# return self.data_cache[fp][cache_idx]
#██████╗ █████╗ ████████╗ █████╗ ███████╗███████╗████████╗
#██╔══██╗██╔══██╗╚══██╔══╝██╔══██╗██╔════╝██╔════╝╚══██╔══╝
#██║ ██║███████║ ██║ ███████║███████╗█████╗ ██║
#██║ ██║██╔══██║ ██║ ██╔══██║╚════██║██╔══╝ ██║
#██████╔╝██║ ██║ ██║ ██║ ██║███████║███████╗ ██║
#╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝ ╚═╝
dataset_name = args.dataset
dataset_split = float(args.split)
value_batch_size = args.batchsize
normalization = args.normalization
augmentation = args.augmentation
random_seed = args.randomseed
# dataset = HDF5Dataset('output/dataset/NLRAVpeaks_hdf5/', recursive=True, load_data=True, data_cache_size=1, transform=None)
#
# for x, y in dataset:
# t_dataset = torch.utils.data.TensorDataset(x,y)
#
# item_perm = torch.randperm(int(x.size(0)))
#
# x=x[item_perm]
# y=y[item_perm]
#
# X_train = x[:int(round(x.size(0)*value_dataset_split,0))]
# X_valid = x[int(round(x.size(0)*value_dataset_split,0)):]
# Y_train = y[0:int(round(x.size(0)*value_dataset_split,0))]
# Y_valid = y[int(round(x.size(0)*value_dataset_split,0)):]
#
# X_train.unsqueeze_(1)
# X_valid.unsqueeze_(1)
# if dataset_name == 'NLRAV' and args.split=='0.7':
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NLRAV/training_set_NLRAV_set_data.pickle", "rb") as input_file:
# X_train = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NLRAV/training_set_NLRAV_labels.pickle", "rb") as input_file:
# Y_train = pk.load(input_file)
#
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NLRAV/validation_set_NLRAV_set_data.pickle", "rb") as input_file:
# X_valid = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NLRAV/validation_set_NLRAV_labels.pickle", "rb") as input_file:
# Y_valid = pk.load(input_file)
#
# if dataset_name == 'NLRAV' and args.split=='0.8':
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NLRAV/training_set_NLRAV_set_data.pickle", "rb") as input_file:
# X_train = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NLRAV/training_set_NLRAV_labels.pickle", "rb") as input_file:
# Y_train = pk.load(input_file)
#
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NLRAV/validation_set_NLRAV_set_data.pickle", "rb") as input_file:
# X_valid = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NLRAV/validation_set_NLRAV_labels.pickle", "rb") as input_file:
# Y_valid = pk.load(input_file)
#
# if dataset_name == 'NSVFQ' and args.split=='0.7':
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NSVFQ/training_set_NSVFQ_set_data.pickle", "rb") as input_file:
# X_train = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NSVFQ/training_set_NSVFQ_labels.pickle", "rb") as input_file:
# Y_train = pk.load(input_file)
#
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NSVFQ/validation_set_NSVFQ_set_data.pickle", "rb") as input_file:
# X_valid = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_70_30_NSVFQ/validation_set_NSVFQ_labels.pickle", "rb") as input_file:
# Y_valid = pk.load(input_file)
#
# if dataset_name == 'NSVFQ' and args.split=='0.8':
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NSVFQ/training_set_NSVFQ_set_data.pickle", "rb") as input_file:
# X_train = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NSVFQ/training_set_NSVFQ_labels.pickle", "rb") as input_file:
# Y_train = pk.load(input_file)
#
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NSVFQ/validation_set_NSVFQ_set_data.pickle", "rb") as input_file:
# X_valid = pk.load(input_file)
# with open("./output/dataset/raw_peaks_aug/shuffled_80_20_NSVFQ/validation_set_NSVFQ_labels.pickle", "rb") as input_file:
# Y_valid = pk.load(input_file)
X = []
Y = []
C = []
R = []
P = []
#
# data_names = ['100', '101', '102', '103', '104', '105', '106', '107',
# '108', '109', '111', '112', '113', '114', '115', '116',
# '117', '118', '119', '121', '122', '123', '124', '200',
# '201', '202', '203', '205', '207', '208', '209', '210',
# '212', '213', '214', '215', '217', '219', '220', '221',
# '222', '223', '228', '230', '231', '232', '233', '234']
data_names = ['100', '101', '103', '105', '106', '107',
'108', '109', '111', '112', '113', '114', '115', '116',
'117', '118', '119', '121', '122', '123', '124', '200',
'201', '202', '203', '205', '207', '208', '209', '210',
'212', '213', '214', '215', '217', '219', '220', '221',
'222', '223', '228', '230', '231', '232', '233', '234']
if dataset_name == 'NLRAV':
labels = ['N', 'L', 'R', 'A', 'V']
sub_labels = {'N':'N', 'L':'L', 'R':'R', 'A':'A', 'V':'V'}
elif dataset_name == 'NSVFQ':
labels = ['N', 'S', 'V', 'F', 'Q']
sub_labels = { 'N':'N', 'L':'N', 'R':'N', 'e':'N', 'j':'N',
'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V',
'F':'F',
'/':'Q', 'f':'Q', 'Q':'Q'}
elif dataset_name == 'NSV':
labels = ['N', 'S', 'V']
sub_labels = { 'N':'N', 'L':'N', 'R':'N', 'e':'N', 'j':'N',
'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V'}
elif dataset_name == 'SV':
labels = ['S', 'V']
sub_labels = { 'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V'}
elif dataset_name == 'NLRAVU':
labels = ['N', 'L', 'R', 'A', 'V', 'U']
sub_labels = {'N':'N', 'L':'L', 'R':'R', 'A':'A', 'V':'V', 'U':'U'}
elif dataset_name == 'NSVFQU':
labels = ['N', 'S', 'V', 'F', 'Q', 'U']
sub_labels = { 'N':'N', 'L':'N', 'R':'N', 'e':'N', 'j':'N',
'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V',
'F':'F',
'/':'Q', 'f':'Q', 'Q':'Q',
'U':'U'}
half_window = 99
# if 'U' in labels:
# peak_path = args.peak
# with open(f'{peak_path}/matrix_l.pickle', 'rb') as input_file:
# matrix_l = pk.load(input_file)
# peak_path = args.peak
# with open(f'{peak_path}/matrix_l.pickle', 'rb') as input_file:
# matrix_l = pk.load(input_file)
printProgressBar(0, len(data_names), prefix = 'Dataset building:', suffix = '', length = 50)
for d in data_names:
r = wfdb.rdrecord('./dataset/raw/'+d)
ann = wfdb.rdann('./dataset/raw/'+d, 'atr', return_label_elements=['label_store', 'symbol'])
# sig = np.array(r.p_signal[:,0])
# intsig = np.array(r.p_signal[:,0])
if d!='114':
sig = np.array(r.p_signal[:,0])
intsig = np.array(r.p_signal[:,0])
else:
sig = np.array(r.p_signal[:,1])
intsig = np.array(r.p_signal[:,1])
sig_len = len(sig)
sym = ann.symbol
pos = ann.sample
# if 'U' in labels:
# sym_len = len(sym)
# for i in range(0, sym_len, 4):
# if int(pos[i] + pos[i+1]) > 100:
# sym.append('U')
# pos = np.append(pos,[int((pos[i] + pos[i+1]) / 2)])
# if 'U' in labels:
# for matrix, i in zip(matrix_l[data_names.index(d)], range(len(matrix_l[data_names.index(d)]))):
# if len(matrix) == 0:
# sym.append('U')
# pos = np.append(pos,pos[i])
# if d == '231':
# for i, matrix in enumerate(matrix_l[data_names.index(d)]):
# if len(matrix) == 0:
# print(pos[i])
# exit()
beat_len = len(sym)
for i in range(beat_len):
for j in range(-augmentation[0]*augmentation[1],augmentation[0]*augmentation[1]+1,augmentation[1]):
if pos[i]-half_window+j>=0 and pos[i]+half_window+j<=sig_len and sym[i] in sub_labels:
frame = sig[pos[i]-half_window+j:pos[i]+half_window+j]
X.append(frame)
Y.append(labels.index(sub_labels[sym[i]]))
C.append(True if j == 0 else False)
R.append(data_names.index(d))
# P.append(pos[i])
printProgressBar(data_names.index(d) + 1, len(data_names), prefix = 'Dataset building:', suffix = '', length = 50)
t_done = False
t_dict = {'prefix' : f'{color.NONE}Data loader{color.END}: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
item_perm = np.arange(np.size(X,0))
np.random.seed(random_seed)
np.random.shuffle(item_perm)
X = np.array(X)[item_perm]
Y = np.array(Y)[item_perm]
C = np.array(C)[item_perm]
R = np.array(R)[item_perm]
# P = np.array(P)[item_perm]
X_train = X[:round(np.size(X,0)*dataset_split)]
Y_train = Y[:round(np.size(X,0)*dataset_split)]
# C_train = C[:round(np.size(X,0)*dataset_split)]
# R_train = R[:round(np.size(X,0)*dataset_split)]
# P_train = P[:round(np.size(X,0)*dataset_split)]
X_valid = X[round(np.size(X,0)*dataset_split):]
Y_valid = Y[round(np.size(X,0)*dataset_split):]
C_valid = C[round(np.size(X,0)*dataset_split):]
R_valid = R[round(np.size(X,0)*dataset_split):]
# P_valid = P[round(np.size(X,0)*dataset_split):]
X_valid = X_valid[C_valid]
Y_valid = Y_valid[C_valid]
# R_valid = R_valid[C_valid]
# P_valid = P_valid[C_valid]
if normalization:
for i in range(np.size(X_train,0)):
X_train[i]=X_train[i]/np.max(np.absolute(X_train[i]))
for i in range(np.size(X_valid,0)):
X_valid[i]=X_valid[i]/np.max(np.absolute(X_valid[i]))
X_train = torch.from_numpy(X_train)
X_valid = torch.from_numpy(X_valid)
Y_train = torch.from_numpy(Y_train)
Y_valid = torch.from_numpy(Y_valid)
X_train.unsqueeze_(1)
X_train.unsqueeze_(1)
X_valid.unsqueeze_(1)
X_valid.unsqueeze_(1)
t_dataset_train = torch.utils.data.TensorDataset(X_train,Y_train)
t_dataset_valid = torch.utils.data.TensorDataset(X_valid,Y_valid)
loader_train = torch.utils.data.DataLoader(t_dataset_train, batch_size=value_batch_size, shuffle=False)
loader_valid = torch.utils.data.DataLoader(t_dataset_valid, batch_size=value_batch_size, shuffle=False)
t_done = True
time.sleep(0.2)
print('\n\n')
#███╗ ██╗███████╗████████╗██╗ ██╗ ██████╗ ██████╗ ██╗ ██╗
#████╗ ██║██╔════╝╚══██╔══╝██║ ██║██╔═══██╗██╔══██╗██║ ██╔╝
#██╔██╗ ██║█████╗ ██║ ██║ █╗ ██║██║ ██║██████╔╝█████╔╝
#██║╚██╗██║██╔══╝ ██║ ██║███╗██║██║ ██║██╔══██╗██╔═██╗
#██║ ╚████║███████╗ ██║ ╚███╔███╔╝╚██████╔╝██║ ██║██║ ██╗
#╚═╝ ╚═══╝╚══════╝ ╚═╝ ╚══╝╚══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝
conv_indim = args.indimension
pool_ks = 2
conv_1_if = 1
conv_1_of = args.conv1of
conv_1_ks = args.ksize
conv_2_if = conv_1_of
conv_2_of = args.conv2of
conv_2_ks = args.ksize
fully_1_indim = int(conv_2_of * ((((conv_indim - (conv_1_ks - 1)) / pool_ks) -(conv_1_ks - 1)) / pool_ks))
fully_1_outdim = args.foutdim
fully_2_indim = fully_1_outdim
fully_2_outdim = len(labels)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.relu6 = False
self.debug = False
self.quantization = False
self.quantization_inf = False
self.temp = 0
self.dic = {
'item' : [],
'labels' : []
}
self.minoutput_0 = 0
self.maxoutput_0 = 0
self.conv1 = nn.Conv2d(conv_1_if, conv_1_of, (1, conv_1_ks), bias=False)
self.conv2 = nn.Conv2d(conv_2_if, conv_2_of, (1, conv_2_ks), bias=False)
self.pool = nn.MaxPool2d((1, pool_ks))
self.fc1 = nn.Linear(fully_1_indim, fully_1_outdim, bias=False)
self.fc2 = nn.Linear(fully_2_indim, fully_2_outdim, bias=False)
self.sm = nn.Softmax(dim=-1)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
# if(self.debug):
# self.dic['item'].append({
# 'dequant_input' : {},
# 'quant_input' : [],
# 'conv1' : [],
# 'relu1' : [],
# 'pool1' : [],
# 'conv2' : [],
# 'relu2' : [],
# 'pool2' : [],
# 'flatten' : [],
# 'fc1' : [],
# 'relu3' : [],
# 'fc2' : [],
# 'dequant_output' : []})
#
if(self.debug):
# self.dic['item'][-1]['dequant_input'] = x.tolist()
torch.set_printoptions(threshold=500000, precision=10) #, linehalf_windowth=20
f = open(session_path+"inference_data_example/input_"+str(self.temp)+".txt", "w")
f.write("\n\ndequant\n")
f.write(str(x))
if(self.quantization):
x = self.quant(x)
if(self.debug):
# print('AAAAAA')
# print(x[3])
# # print(self.dequant(x).numpy())
# # print(x.scale())
# exit()
# print('BBBBB')
# (self.dic['item']['quant_input'][tensor], self.dic['item']['quant_input']['size'], self.dic['item']['quant_input']['dtype'], self.dic['item']['quant_input']['quantization_scheme'], self.dic['item']['quant_input']['scale'], self.dic['item']['quant_input']['zero_point']) = x
# print(self.dic)
# exit()
# for i in x[0]:
# print('BBBBB')
# print(x)
# exit()
# self.dic['item'][-1]['quant_input'] = x.tolist()
f.write("\n\nquant\n")
f.write(str(x))
x = self.conv1(x)
if(self.quantization_inf):
if(torch.min(x)<self.minoutput_0):
self.minoutput_0 = torch.min(x)
if(torch.max(x)>self.maxoutput_0):
self.maxoutput_0 = torch.max(x)
if(self.debug):
# self.dic['item'][-1]['conv1'] = x.tolist()
f.write("\n\nconv1\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
# self.dic['item'][-1]['relu1'] = x.tolist()
f.write("\n\nrelu1\n")
f.write(str(x))
x = self.pool(x)
if(self.debug):
# self.dic['item'][-1]['pool1'] = x.tolist()
f.write("\n\npool1\n")
f.write(str(x))
x = self.conv2(x)
if(self.debug):
# self.dic['item'][-1]['conv2'] = x.tolist()
f.write("\n\nconv2\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
# self.dic['item'][-1]['relu2'] = x.tolist()
f.write("\n\nrelu2\n")
f.write(str(x))
x = self.pool(x)
if(self.debug):
# self.dic['item'][-1]['pool2'] = x.tolist()
f.write("\n\npool2\n")
f.write(str(x))
x = x.flatten(1)
if(self.debug):
# self.dic['item'][-1]['flatten'] = x.tolist()
f.write("\n\nflatten\n")
f.write(str(x))
x=self.fc1(x)
if(self.debug):
# self.dic['item'][-1]['fc1'] = x.tolist()
f.write("\n\nfc1\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
# self.dic['item'][-1]['relu3'] = x.tolist()
f.write("\n\nrelu3\n")
f.write(str(x))
x = self.fc2(x)
if(self.debug):
# self.dic['item'][-1]['fc2'] = x.tolist()
f.write("\n\nfc2\n")
f.write(str(x))
if(self.quantization):
x = self.dequant(x)
if(self.debug):
# self.dic['item'][-1]['dequant_output'] = x.tolist()
f.write("\n\ndequant\n\n")
f.write(str(x))
f.close()
# x = self.sm(x)
return x
# Fuse Conv+BN and Conv+BN+Relu modules prior to quantization
# This operation does not change the numerics
def fuse_model(self):
for m in self.modules():
if type(m) == ConvBNReLU:
torch.quantization.fuse_modules(m, ['0', '1', '2'], inplace=True)
if type(m) == InvertedResidual:
for idx in range(len(m.conv)):
if type(m.conv[idx]) == nn.Conv2d:
torch.quantization.fuse_modules(m.conv, [str(idx), str(idx + 1)], inplace=True)
#████████╗██████╗ █████╗ ██╗███╗ ██╗██╗███╗ ██╗ ██████╗
#╚══██╔══╝██╔══██╗██╔══██╗██║████╗ ██║██║████╗ ██║██╔════╝
# ██║ ██████╔╝███████║██║██╔██╗ ██║██║██╔██╗ ██║██║ ███╗
# ██║ ██╔══██╗██╔══██║██║██║╚██╗██║██║██║╚██╗██║██║ ██║
# ██║ ██║ ██║██║ ██║██║██║ ╚████║██║██║ ╚████║╚██████╔╝
# ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝
num_trainepoch = args.epoch
num_trainepoch_effective = 0
dim_batches = 25
model = Net()
# optimizer = optim.Adam(model.parameters(), lr=0.0005)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.9, eps=1e-06, weight_decay=0)
criterion = nn.CrossEntropyLoss()
max_i = []
train_data = [[] for i in range(5)]
train_dic = {
'train_loss' : 0,
'valid_loss' : 1,
'train_acc' : 2,
'valid_acc' : 3,
'learing_rate' : 4
}
epoch_loss = 0
epoch_acc = 0
cnt_allbatches = 0
tmp_cnt = 0
tmp_cnt_t = 0
frac = 33
print('\n\n\n\n\n', end = '')
printProgressBar(cnt_allbatches, len(loader_train)/dim_batches * num_trainepoch, prefix = f'{color.NONE}Training:{color.END}', suffix = '', length = 55)
print('\033[F\033[F\033[F\033[F\033[F', end = '')
try:
for epoch in range(num_trainepoch): # loop over the dataset multiple times
cnt_batches = 0
cnt = 0
cnt_t = 0
epoch_loss = 0
running_loss = 0.0
printProgressBar(0, len(loader_train), prefix = 'Epoch ' + str(epoch + 1) + '/' + str(num_trainepoch) + ':', suffix = ' ', length = 40)
for i, data in enumerate(loader_train):
inputs, labels = data
# print(len(inputs))
# exit()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels)
list.clear(max_i)
for o in outputs:
m=max(o)
indx=list(o).index(m)
max_i.append(indx)
for o, m in zip(labels, max_i):
if o == m:
cnt = cnt + 1
cnt_t = cnt_t + 1
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if i == len(loader_train) - 1:
epoch_loss = epoch_loss/len(loader_train)
running_loss += loss.item()
if (i % dim_batches) == (dim_batches - 1):
printProgressBar(i, len(loader_train) - 1, prefix = 'Session \''+session_name+'\', epoch ' + str(epoch + 1) + '/' + str(num_trainepoch) + ':', suffix = ' ', length = 25)
if i == len(loader_train) - 1:
print('', end='\033[F')
# training_loss.append(epoch_loss)
print('\nLoss during training: %f\nAccuracy during training: %f\n\n' % (epoch_loss/i, (cnt/cnt_t)))
printProgressBar(cnt_allbatches + i, len(loader_train) * num_trainepoch - 1, prefix = 'Training:', suffix = '', length = 55)
if cnt_allbatches + i == (len(loader_train) * num_trainepoch) - 1:
print('', end='\033[F')
print('\033[F\033[F\033[F\033[F\033[F\033[F')
running_loss = 0.0
elif i == (len(loader_train) - 1):
# training_loss.append(epoch_loss)
printProgressBar(i, len(loader_train) - 1, prefix = 'Session \''+session_name+'\', epoch ' + str(epoch + 1) + '/' + str(num_trainepoch) + ':', suffix = ' ', length = 25)
print('', end='\033[F')
print('\nLoss during training: %f\nAccuracy during training: %f\n\n' % (epoch_loss, (cnt/cnt_t)))
printProgressBar(cnt_allbatches + i, len(loader_train) * num_trainepoch - 1, prefix = 'Training:', suffix = '', length = 55)
if cnt_allbatches + i == (len(loader_train) * num_trainepoch) - 1:
print('', end='\033[F')
print('', end='\033[F\033[F')
running_loss = 0.0
cnt_allbatches = cnt_allbatches + len(loader_train)
train_data[train_dic['learing_rate']].append(optimizer.param_groups[0]['lr'])
train_data[train_dic['train_acc']].append(cnt/cnt_t)
train_data[train_dic['train_loss']].append(epoch_loss)
cnt = 0
cnt_t = 0
epoch_loss = 0
# print(optimizer.param_groups[0]['lr'])
t_done = False
t_dict = {'prefix' : 'Accuracy on validation set: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
for i, data in enumerate(loader_valid):
inputs, labels = data
outputs = model(inputs.float())
list.clear(max_i)
for o in outputs:
m=max(o)
indx=list(o).index(m)
max_i.append(indx)
for o, m in zip(labels, max_i):
if o == m:
cnt = cnt + 1
cnt_t = cnt_t + 1
epoch_loss += loss.item()
if i == len(loader_valid) - 1:
epoch_loss = epoch_loss/len(loader_valid)
t_done = True
time.sleep(0.2)
train_data[train_dic['valid_acc']].append(cnt/cnt_t)
train_data[train_dic['valid_loss']].append(epoch_loss)
epoch_acc = cnt/cnt_t
# training_acc.append(epoch_acc)
num_trainepoch_effective += 1
print('\033[FAccuracy on validation set: %f\n' % epoch_acc)
printProgressBar(cnt_allbatches, len(loader_train) * num_trainepoch, prefix = f'{color.NONE}Training{color.END}:', suffix = '', length = 55)
except KeyboardInterrupt:
print('\n\n\n\n\n')
print('\n')
save_model(model,'')
# ██████╗ ██╗ ██╗ █████╗ ███╗ ██╗████████╗██╗███████╗ █████╗ ████████╗██╗ ██████╗ ███╗ ██╗
#██╔═══██╗██║ ██║██╔══██╗████╗ ██║╚══██╔══╝██║╚══███╔╝██╔══██╗╚══██╔══╝██║██╔═══██╗████╗ ██║
#██║ ██║██║ ██║███████║██╔██╗ ██║ ██║ ██║ ███╔╝ ███████║ ██║ ██║██║ ██║██╔██╗ ██║
#██║ █ ██║██║ ██║██╔══██║██║╚██╗██║ ██║ ██║ ███╔╝ ██╔══██║ ██║ ██║██║ ██║██║╚██╗██║
#╚██████╔╝╚██████╔╝██║ ██║██║ ╚████║ ██║ ██║███████╗██║ ██║ ██║ ██║╚██████╔╝██║ ╚████║
# ╚══█═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
# Setup warnings
import warnings
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*'
)
warnings.filterwarnings(
action='default',
module=r'torch.quantization'
)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=7, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes, momentum=0.1),
# Replace with ReLU
nn.ReLU(inplace=False)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup, momentum=0.1),
])
self.conv = nn.Sequential(*layers)
# Replace torch.add with floatfunctional
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
if self.use_res_connect:
return self.skip_add.add(x, self.conv(x))
else:
return self.conv(x)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(model, criterion, data_loader, neval_batches):
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
topn = AverageMeter('Acc@5', ':6.2f')
cnt = 0
with torch.no_grad():
printProgressBar(0, len(data_loader), prefix = 'Calibrate:', suffix = '', length = 40)
for image, target in data_loader:
output = model(image.float())
loss = criterion(output, target)
cnt += 1
acc1, accn = accuracy(output, target, topk=(1, fully_2_outdim))
# print('.', end = '')
top1.update(acc1[0], image.size(0))
topn.update(accn[0], image.size(0))
printProgressBar(cnt, len(data_loader), prefix = 'Calibrate:', suffix = '', length = 40)
if cnt >= neval_batches:
return top1, topn
return top1, top5
def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):
model.train()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
avgloss = AverageMeter('Loss', '1.5f')
cnt = 0
for image, target in data_loader:
start_time = time.time()
print('.', end = '')
cnt += 1
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], image.size(0))
top5.update(acc5[0], image.size(0))
avgloss.update(loss, image.size(0))
if cnt >= ntrain_batches:
print('\nLoss', avgloss.avg)
print('Training: * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return
print('Full imagenet train set: * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
.format(top1=top1, top5=top5))
return
def convert_state_dict(src_dict): #1
"""Return the correct mapping of tensor name and value
Mapping from the names of torchvision model to our resnet conv_body and box_head.
"""
dst_dict = {}
for k, v in src_dict.items():
toks = k.split('.')
if k.startswith('layer'):
assert len(toks[0]) == 6
res_id = int(toks[0][5]) + 1
name = '.'.join(['res%d' % res_id] + toks[1:])
dst_dict[name] = v
elif k.startswith('fc'):
continue
else:
name = '.'.join(['res1'] + toks)
dst_dict[name] = v
return dst_dict
def model_state_dict_parallel_convert(state_dict, mode): #2
from collections import OrderedDict
new_state_dict = OrderedDict()
if mode == 'to_single':
for k, v in state_dict.items():
name = k[7:] # remove 'module.' of DataParallel
new_state_dict[name] = v
elif mode == 'to_parallel':
for k, v in state_dict.items():
name = 'module.' + k # add 'module.' of DataParallel
new_state_dict[name] = v
elif mode == 'same':
new_state_dict = state_dict
else:
raise Exception('mode = to_single / to_parallel')
return new_state_dict
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor): #3
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
# ██████╗ ██╗ ██╗ █████╗ ███╗ ██╗████████╗ ██████╗ ██████╗ ███████╗████████╗████████╗██████╗ █████╗ ██╗███╗ ██╗██╗███╗ ██╗ ██████╗
#██╔═══██╗██║ ██║██╔══██╗████╗ ██║╚══██╔══╝ ██╔══██╗██╔═══██╗██╔════╝╚══██╔══╝╚══██╔══╝██╔══██╗██╔══██╗██║████╗ ██║██║████╗ ██║██╔════╝
#██║ ██║██║ ██║███████║██╔██╗ ██║ ██║ ██████╔╝██║ ██║███████╗ ██║█████╗██║ ██████╔╝███████║██║██╔██╗ ██║██║██╔██╗ ██║██║ ███╗
#██║ █ ██║██║ ██║██╔══██║██║╚██╗██║ ██║ ██╔═══╝ ██║ ██║╚════██║ ██║╚════╝██║ ██╔══██╗██╔══██║██║██║╚██╗██║██║██║╚██╗██║██║ ██║
#╚██████╔╝╚██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ╚██████╔╝███████║ ██║ ██║ ██║ ██║██║ ██║██║██║ ╚████║██║██║ ╚████║╚██████╔╝
# ╚═══█═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝
model_quantized = copy.deepcopy(model)
model_quantized.quantization = True
eval_batch_size = value_batch_size
num_calibration_batches = math.ceil(X_train.size(0)/eval_batch_size)
num_eval_batches = math.ceil(X_train.size(0)/eval_batch_size)
model_quantized.eval()
## Fuse Conv, bn and relu
#model_quantized.fuse_model()
# Specify quantization configuration
# Start with simple min/max range estimation and per-tensor quantization of weights
model_quantized.qconfig = torch.quantization.get_default_qconfig('qnnpack') # 'fbgemm' 'qnnpack'
torch.backends.quantized.engine = 'qnnpack' # 'fbgemm' 'qnnpack'
# print(model_quantized.qconfig)
# model_quantized.qconfig.weight = torch.quantization.observer.MinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_symmetric)
# print(model_quantized.qconfig.weight)
# exit()
torch.quantization.prepare(model_quantized, inplace=True)
print(f'\n{color.NONE}Post-training quantization{color.END}')
# Calibrate first
# print('Prepare: Inserting Observers')
#print('\n Inverted Residual Block:After observer insertion \n\n', model_quantized.features[1].conv)
# Calibrate with the training set
evaluate(model_quantized, criterion, loader_train, neval_batches=num_calibration_batches)
# print('Post Training Quantization: Calibration done')
# Convert to quantized model
t_done = False
t_dict = {'prefix' : 'Covert: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
torch.quantization.convert(model_quantized, inplace=True)
torch.set_printoptions(threshold=500000, precision=10) #, linehalf_windowth=20
# Print model's state_dict
# print()
#
# # Print optimizer's state_dict
# print("Optimizer's state_dict:")
# for var_name in optimizer.state_dict():
# print(var_name, "\t", optimizer.state_dict()[var_name])
# print(model_quantized)
# exit()
# c_state_dict = convert_state_dict(model_quantized.state_dict())
# c_state_dict = model_state_dict_parallel_convert(model_quantized.state_dict(), 'to_parallel')
# c_state_dict = convert_state_dict_type(model_quantized.state_dict())
# print(model_quantized.state_dict())
# print(c_state_dict)
# print(json.dumps(model_quantized.state_dict(), indent = 4, ensure_ascii=False))
# print(json.dumps(c_state_dict, indent = 4, ensure_ascii=False))
# scripted = torch.jit.script(model_quantized)
# scripted.save("traced_resnet_model.pt")
# print(scripted.item())
# print('AAA')
# # print(scripted.make_dict())
t_done = True
time.sleep(0.2)
# print('Post Training Quantization: Convert done')
# top1, top5 = evaluate(model_quantized, criterion, loader_valid, neval_batches=num_eval_batches)
# print('\n\nEvaluation accuracy on %d samples, %.3f'%(num_eval_batches * eval_batch_size, top1.avg))
save_model(model_quantized, '_quantized')
f = open(session_path+"model_quantized.h", "w")
for param_tensor in model_quantized.state_dict():
try:
temp_size = model_quantized.state_dict()[param_tensor].size()
except:
continue
if temp_size not in [torch.Size([]), torch.Size([1])]:
first = True
if model_quantized.state_dict()[param_tensor].dtype in [torch.qint8, torch.quint8]:
temp_data = model_quantized.state_dict()[param_tensor].int_repr().numpy().flatten()
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}_SCALE {model_quantized.state_dict()[param_tensor].q_scale()}\n")
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}_ZERO_POINT {model_quantized.state_dict()[param_tensor].q_zero_point()}\n")
else:
temp_data = model_quantized.state_dict()[param_tensor].numpy().flatten()
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}_DIM {len(temp_data)}\n")
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}" + ' {')
for i in temp_data: #.flatten('F')
if 'bias' in param_tensor:
if first:
first = False
f.write(f'{int(i)}')
else:
f.write(f', {int(i)}')
else:
if first:
first = False
f.write(f'{i}')
else:
f.write(f', {i}')
f.write('}\n')
else:
if model_quantized.state_dict()[param_tensor].dtype in [torch.qint8, torch.quint8]:
temp_data = model_quantized.state_dict()[param_tensor].int_repr().numpy().flatten()[0]
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}_SCALE {model_quantized.state_dict()[param_tensor].q_scale()}\n")
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()}_ZERO_POINT {model_quantized.state_dict()[param_tensor].q_zero_point()}\n")
else:
temp_data = model_quantized.state_dict()[param_tensor].numpy().flatten()[0]
f.write(f"#define {str(param_tensor).replace('.', '_').replace('__', '_').upper()} {temp_data}\n")
f.write('\n')
# print('')
# print(model_quantized.state_dict()['conv1.weight'].q_scale())
# print(model_quantized.state_dict()['conv1.scale'].numpy())
# print(model_quantized.state_dict()['quant.scale'].numpy())for param_tensor in model_quantized.state_dict():
# exit()
f = open(session_path+"model_quantized.txt", "w")
# print("Model's state_dict:")
for param_tensor in model_quantized.state_dict():
try:
f.write(f"{param_tensor}, {model_quantized.state_dict()[param_tensor].size()}\n")
# print(param_tensor, ", ", model_quantized.state_dict()[param_tensor].size())
except:
f.write(f"{param_tensor}, Size error\n")
# print(param_tensor, "Size error")
f.write(str(model_quantized.state_dict()[param_tensor]))
f.write('\n\n-----------\n\n')
# print(model_quantized.state_dict()[param_tensor])
# print('\n-----------\n')
f.close()
# # Print optimizer's state_dict
# print("Optimizer's state_dict:")
# for var_name in optimizer.state_dict():
# print(var_name, "\t", optimizer.state_dict()[var_name])
# exit()
# ██████╗ ██╗ ██╗ █████╗ ███╗ ██╗████████╗ █████╗ ██╗ ██╗ █████╗ ██████╗ ███████╗ ████████╗██████╗ █████╗ ██╗███╗ ██╗██╗███╗ ██╗ ██████╗
#██╔═══██╗██║ ██║██╔══██╗████╗ ██║╚══██╔══╝ ██╔══██╗██║ ██║██╔══██╗██╔══██╗██╔════╝ ╚══██╔══╝██╔══██╗██╔══██╗██║████╗ ██║██║████╗ ██║██╔════╝
#██║ ██║██║ ██║███████║██╔██╗ ██║ ██║ ███████║██║ █╗ ██║███████║██████╔╝█████╗█████╗██║ ██████╔╝███████║██║██╔██╗ ██║██║██╔██╗ ██║██║ ███╗
#██║ █ ██║██║ ██║██╔══██║██║╚██╗██║ ██║ ██╔══██║██║███╗██║██╔══██║██╔══██╗██╔══╝╚════╝██║ ██╔══██╗██╔══██║██║██║╚██╗██║██║██║╚██╗██║██║ ██║
#╚██████╔╝╚██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║╚███╔███╔╝██║ ██║██║ ██║███████╗ ██║ ██║ ██║██║ ██║██║██║ ╚████║██║██║ ╚████║╚██████╔╝
# ╚═══█═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝
#model_temp = model
#num_trainepoch_quant = 8
#optimizer = torch.optim.SGD(model_temp.parameters(), lr = 0.005)
#eval_batch_size = value_batch_size
#num_train_batches = math.ceil(round(x.size(0)*value_dataset_split,0)/eval_batch_size)
#num_eval_batches = math.ceil(round(x.size(0)*(1-value_dataset_split),0)/eval_batch_size)
#model_temp.fuse_model()
#model_temp.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack')
#torch.quantization.prepare_qat(model_temp, inplace=True)
## Train and check accuracy after each epoch
#for nepoch in range(num_trainepoch_quant):
# train_one_epoch(model_temp, criterion, optimizer, loader_train, torch.device('cpu'), num_train_batches)
# if nepoch > 3:
# # Freeze quantizer parameters
# model_temp.apply(torch.quantization.disable_observer)
# if nepoch > 2:
# # Freeze batch norm mean and variance estimates
# model_temp.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
# # Check the accuracy after each epoch
# model_quantized = torch.quantization.convert(model_temp.eval(), inplace=False)
# model_quantized.eval()
# top1, top5 = evaluate(model_quantized,criterion, loader_valid, neval_batches=num_eval_batches)
# print('\nEpoch %d :Evaluation accuracy on %d images, %2.2f\n'%(nepoch, num_eval_batches * eval_batch_size, top1.avg))
#███████╗██╗ ██╗ █████╗ ██╗ ██╗ ██╗ █████╗ ████████╗██╗ ██████╗ ███╗ ██╗
#██╔════╝██║ ██║██╔══██╗██║ ██║ ██║██╔══██╗╚══██╔══╝██║██╔═══██╗████╗ ██║
#█████╗ ██║ ██║███████║██║ ██║ ██║███████║ ██║ ██║██║ ██║██╔██╗ ██║
#██╔══╝ ╚██╗ ██╔╝██╔══██║██║ ██║ ██║██╔══██║ ██║ ██║██║ ██║██║╚██╗██║
#███████╗ ╚████╔╝ ██║ ██║███████╗╚██████╔╝██║ ██║ ██║ ██║╚██████╔╝██║ ╚████║
#╚══════╝ ╚═══╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
cnt = 0
cnt_t = 0
cm_cnt = 0
cmatrix_temp = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
# cmatrix = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
# accmatrix = np.zeros(fully_2_outdim)
cmatrix = np.zeros((len(data_names), fully_2_outdim, fully_2_outdim), dtype=int)
# cmatrix = [[[[] for j in range(fully_2_outdim)] for k in range(fully_2_outdim)] for d in data_names]
print('\n\n')
printProgressBar(0, len(loader_valid), prefix = f'{color.NONE}Floating model evaluation{color.END}:', suffix = '', length = 40)
for i, data in enumerate(loader_valid):
inputs, labels = data
outputs = model(inputs.float())
list.clear(max_i)
for o in outputs:
m=max(o)
indx=list(o).index(m)
max_i.append(indx)
for idx, tgt in zip(max_i, labels):
cmatrix[R_valid[cm_cnt]][idx][tgt] += 1
cm_cnt += 1
# for o, m in zip(labels, max_i):
# if o == m:
# cnt = cnt + 1
# cnt_t = cnt_t + 1
printProgressBar(i + 1, len(loader_valid), prefix = f'{color.NONE}Floating model evaluation{color.END}:', suffix = '', length = 40)
# print('\nAccuracy on validation set with floating point model: %f' % (cnt/cnt_t))
#
# cnt = 0
# cnt_t = 0
for matrix in cmatrix:
cmatrix_temp += matrix
for i in range(fully_2_outdim):
cnt += matrix[i][i]
cnt_t += matrix.sum(axis=0)[i]
print('\nAccuracy on validation set with floating point model: %f' % (cnt/cnt_t))
print('\nConfusion matrix:')
print(cmatrix_temp)
cnt_q = 0
cnt_t_q = 0
cm_cnt = 0
cmatrix_temp = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
# cmatrix_q = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
# accmatrix_q = np.zeros(fully_2_outdim)
cmatrix_q = np.zeros((len(data_names), fully_2_outdim, fully_2_outdim), dtype=int)
# cmatrix_q = [[[[] for j in range(fully_2_outdim)] for k in range(fully_2_outdim)] for d in data_names]
print('\n\n')
printProgressBar(0, len(loader_valid), prefix = f'{color.NONE}Fixed model evaluation{color.END}:', suffix = '', length = 40)
for i, data in enumerate(loader_valid):
inputs, labels = data
outputs = model_quantized(inputs.float())
list.clear(max_i)
for o in outputs:
m=max(o)
indx=list(o).index(m)
max_i.append(indx)
for idx, tgt in zip(max_i, labels):
cmatrix_q[R_valid[cm_cnt]][idx][tgt] += 1
cm_cnt += 1
# for o, m in zip(labels, max_i):
# if o == m:
# cnt_q = cnt_q + 1
# cnt_t_q = cnt_t_q + 1
printProgressBar(i + 1, len(loader_valid), prefix = f'{color.NONE}Fixed model evaluation{color.END}:', suffix = '', length = 40)
for matrix in cmatrix_q:
cmatrix_temp += matrix
for i in range(fully_2_outdim):
cnt_q += matrix[i][i]
cnt_t_q += matrix.sum(axis=0)[i]
print('\nAccuracy on validation set with fixed point model: %f' % (cnt_q/cnt_t_q))
# for i in range(fully_2_outdim):
# accmatrix_q[i] = (cmatrix_q[i][i]/cmatrix_q.sum(axis=0)[i])
print('\nConfusion matrix:')
print(cmatrix_temp)
# print('\nAccuracy per output:')
# print(accmatrix_q)
# ███████╗██╗ ██╗██████╗ ██████╗ ██████╗ ████████╗██╗███╗ ██╗ ██████╗ ██████╗ █████╗ ████████╗ █████╗
# ██╔════╝╚██╗██╔╝██╔══██╗██╔═══██╗██╔══██╗╚══██╔══╝██║████╗ ██║██╔════╝ ██╔══██╗██╔══██╗╚══██╔══╝██╔══██╗
# █████╗ ╚███╔╝ ██████╔╝██║ ██║██████╔╝ ██║ ██║██╔██╗ ██║██║ ███╗ ██║ ██║███████║ ██║ ███████║
# ██╔══╝ ██╔██╗ ██╔═══╝ ██║ ██║██╔══██╗ ██║ ██║██║╚██╗██║██║ ██║ ██║ ██║██╔══██║ ██║ ██╔══██║
# ███████╗██╔╝ ██╗██║ ╚██████╔╝██║ ██║ ██║ ██║██║ ╚████║╚██████╔╝ ██████╔╝██║ ██║ ██║ ██║ ██║
# ╚══════╝╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝
print('\n\n')
t_done = False
t_dict = {'prefix' : f'{color.NONE}Exporting data{color.END}: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
model_quantized.debug = True
for i, data in enumerate(loader_valid):
inputs, labels = data
if(model_quantized.debug):
for j in range(0,value_batch_size):
model_quantized.temp = j
outputs = model_quantized(inputs[j].unsqueeze_(0).float())
# model_quantized.dic['labels'] = list(labels)
#
# print('\nAAAAA\n')
# print(model_quantized.dic)
# print('\nBBBBB\n')
# # print(json.dumps(model_quantized.dic, indent = 4, ensure_ascii=False))
# # print('\nCCCCC\n')
#
# with open(session_path + 'inference_data_example/json_format.json', 'w') as json_file:
# json.dump(model_quantized.dic, json_file) #, indent = 4, ensure_ascii=False)
torch.set_printoptions(threshold=500000, precision=10) #,linehalf_windowth=20
f = open(session_path+"inference_data_example/labels.txt", "w")
f.write(str(labels))
f.close()
break
training_parameters = {
'session_name': session_name,
'dataset_name' : dataset_name,
'dataset_split' : dataset_split,
'augmentation' : False if augmentation == [0, 1] else augmentation,
'random_seed' : random_seed,
'batch_size' : value_batch_size,
'normalization' : normalization,
'conv_indim' : conv_indim,
'pool_ks' : pool_ks,
'conv_1_if' : conv_1_if,
'conv_1_of' : conv_1_of,
'conv_1_ks' : conv_1_ks,
'conv_2_if' : conv_2_if,
'conv_2_of' : conv_2_of,
'conv_2_ks' : conv_2_ks,
'fully_1_indim' : fully_1_indim,
'fully_1_outdim' : fully_1_outdim,
'fully_2_indim' : fully_2_indim,
'fully_2_outdim' : fully_2_outdim,
'train_epoch' : num_trainepoch_effective,
'optimizer' : str(optimizer).replace("\n","").replace(" ",", "),
'criterion' : str(criterion),
'training_acc' : train_data[train_dic['train_acc']][-1],
'validation_acc' : train_data[train_dic['valid_acc']][-1],
'training_loss' : train_data[train_dic['train_loss']][-1],
'validation_loss' : train_data[train_dic['valid_loss']][-1],
'learning_rate' : train_data[train_dic['learing_rate']][-1],
'fixed_point_accuracy' : (cnt_q/cnt_t_q)
}
with open(session_path+'training_summary.json', 'w') as json_file:
json.dump(training_parameters, json_file, indent=4)
# with open(session_path+'training_loss.pickle', 'wb') as output_file:
# pk.dump(training_loss, output_file)
#
# with open(session_path+'training_acc.pickle', 'wb') as output_file:
# pk.dump(training_acc, output_file)
with open(session_path+'training_data.pickle', 'wb') as output_file:
pk.dump(train_data, output_file)
with open(session_path+'confusionmatrix_float.pickle', 'wb') as output_file:
pk.dump(cmatrix, output_file)
with open(session_path+'confusionmatrix_fixed.pickle', 'wb') as output_file:
pk.dump(cmatrix_q, output_file)
t_done = True
time.sleep(0.2)
print(f'\n{color.NONE}Summary{color.END}')
for par in training_parameters:
print(repr(par),":",training_parameters[par])
print(f'{color.BOLD}\n\n\nEnding {color.NONE}training{color.END}{color.BOLD} session \'{session_name}\'{color.END}')
| 59,583 | 34.319502 | 287 | py |
ecg-classification-quantized-cnn | ecg-classification-quantized-cnn-main/tool_onnxgen.py | import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
import os
from pathlib import Path
import shutil
if os.path.isdir('./output/net/'):
print("Session already exists (./output/net/), overwrite the session? (y/n): ", end='')
force_write = input()
print("")
if force_write == "y":
try:
shutil.rmtree("./output/net/")
Path("./output/net/").mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation (./output/net/).")
exit()
else:
try:
Path("./output/net/").mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation (./output/net/).")
exit()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 18, (1, 7))
self.conv2 = nn.Conv2d(18, 18, (1, 7))
self.pool = nn.MaxPool2d((1, 2))
self.fc1 = nn.Linear(18 * 45, 100)
self.fc2 = nn.Linear(100, 5)
# self.sm = nn.Softmax(dim=-1)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.flatten(1)
# x = x.view(-1,)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = Net()
dummy_input = torch.randn(1, 1, 1, 198)
torch.onnx.export(net,dummy_input,"output/net/net.onnx")
| 1,416 | 23.859649 | 91 | py |
ecg-classification-quantized-cnn | ecg-classification-quantized-cnn-main/evaluation.py | import torch
import torchvision
import torch.quantization
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pickle as pk
import pandas as pd
import wfdb
import math
import os
import sys
import argparse
from pathlib import Path
import shutil
import copy
import time
import json
import itertools
import threading
from torch.quantization import QuantStub, DeQuantStub
from pathlib import Path
from torch.utils import data
class color:
NONE = ''
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
parser = argparse.ArgumentParser()
parser.add_argument('-n','--name', dest='name', required=True, help="session name")
parser.add_argument('-p','--peak', dest='peak', required=True, help="peak detection session path")
parser.add_argument('-t','--train', dest='train', required=True, help="training session path")
parser.add_argument('-o','--overwrite', dest='overwrite', action='store_true', help="overwrite the session if it already exists")
args = parser.parse_args()
session_name = args.name
session_path = "output/evaluation/"+session_name+"/"
if os.path.isdir(session_path):
if args.overwrite:
try:
shutil.rmtree(session_path)
Path(session_path).mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation ("+session_path+").")
exit()
else:
# print("Session already exists ("+session_path+"), overwrite the session? (y/n): ", end='')
# force_write = input()
# if force_write == "y":
# print('')
# try:
# shutil.rmtree(session_path)
# Path(session_path).mkdir(parents=True, exist_ok=True)
# except OSError:
# print("Error in session creation ("+session_path+").")
# exit()
# else:
# exit()
print(f'Session path ({session_path}) already exists')
exit()
else:
try:
Path(session_path).mkdir(parents=True, exist_ok=True)
except OSError:
print("Error in session creation ("+session_path+").")
exit()
print(f'{color.BOLD}Starting {color.NONE}evaluation{color.END}{color.BOLD} session \'{session_name}\'\n\n\n{color.END}')
session_train_path = args.train
if os.path.exists(session_train_path) == False:
print(session_train_path+" does not exist!")
exit()
session_peak_path = args.peak
if os.path.exists(session_peak_path) == False:
print(session_peak_path+" does not exist!")
exit()
json_file = open(session_train_path+'/training_summary.json', 'r')
json_data = json.load(json_file)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(model, criterion, data_loader, neval_batches):
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
cnt = 0
with torch.no_grad():
printProgressBar(0, len(data_loader), prefix = 'Post training:', suffix = '', length = 55)
for image, target in data_loader:
output = model(image.float())
loss = criterion(output, target)
cnt += 1
acc1, acc5 = accuracy(output, target, topk=(1, 5))
# print('.', end = '')
top1.update(acc1[0], image.size(0))
top5.update(acc5[0], image.size(0))
printProgressBar(cnt, len(data_loader), prefix = 'Post training:', suffix = '', length = 55)
if cnt >= neval_batches:
return top1, top5
return top1, top5
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
t_done = False
def animate(prefix = ''):
for c in itertools.cycle(['|', '/', '-', '\\']):
if t_done:
break
print('\r' + prefix + c, end = '\r')
# sys.stdout.write('\r' + prefix + c)
# sys.stdout.flush()
time.sleep(0.1)
print('\r' + prefix + 'Done!')
# sys.stdout.write('\r' + prefix + 'Done!')
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
criterion = nn.CrossEntropyLoss()
#██████╗ █████╗ ████████╗ █████╗ ███████╗███████╗████████╗
#██╔══██╗██╔══██╗╚══██╔══╝██╔══██╗██╔════╝██╔════╝╚══██╔══╝
#██║ ██║███████║ ██║ ███████║███████╗█████╗ ██║
#██║ ██║██╔══██║ ██║ ██╔══██║╚════██║██╔══╝ ██║
#██████╔╝██║ ██║ ██║ ██║ ██║███████║███████╗ ██║
#╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝ ╚═╝
dataset_name = json_data['dataset_name']
dataset_split = float(json_data['dataset_split'])
value_dataset_split = json_data['dataset_split']
value_batch_size = json_data['batch_size']
normalization = json_data['normalization']
augmentation = json_data['augmentation'] if json_data['augmentation'] else [0, 1]
random_seed = json_data['random_seed']
half_window = 99
X = []
Y = []
C = []
R = []
P = []
data_names = ['100', '101', '102', '103', '104', '105', '106', '107',
'108', '109', '111', '112', '113', '114', '115', '116',
'117', '118', '119', '121', '122', '123', '124', '200',
'201', '202', '203', '205', '207', '208', '209', '210',
'212', '213', '214', '215', '217', '219', '220', '221',
'222', '223', '228', '230', '231', '232', '233', '234']
if dataset_name == 'NLRAV':
labels = ['N', 'L', 'R', 'A', 'V']
sub_labels = {'N':'N', 'L':'L', 'R':'R', 'A':'A', 'V':'V'}
elif dataset_name == 'NSVFQ':
labels = ['N', 'S', 'V', 'F', 'Q']
sub_labels = { 'N':'N', 'L':'N', 'R':'N', 'e':'N', 'j':'N',
'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V',
'F':'F',
'/':'Q', 'f':'Q', 'Q':'Q'}
elif dataset_name == 'NLRAVU':
labels = ['N', 'L', 'R', 'A', 'V', 'U']
sub_labels = {'N':'N', 'L':'L', 'R':'R', 'A':'A', 'V':'V', 'U':'U'}
elif dataset_name == 'NSVFQU':
labels = ['N', 'S', 'V', 'F', 'Q', 'U']
sub_labels = { 'N':'N', 'L':'N', 'R':'N', 'e':'N', 'j':'N',
'A':'S', 'a':'S', 'J':'S', 'S':'S',
'V':'V', 'E':'V',
'F':'F',
'/':'Q', 'f':'Q', 'Q':'Q',
'U':'U'}
# if 'U' in labels:
# peak_path = args.peak
# with open(f'{peak_path}/matrix_l.pickle', 'rb') as input_file:
# matrix_l = pk.load(input_file)
printProgressBar(0, len(data_names), prefix = 'Dataset building:', suffix = '', length = 50)
for d in data_names:
r = wfdb.rdrecord('./dataset/raw/'+d)
ann = wfdb.rdann('./dataset/raw/'+d, 'atr', return_label_elements=['label_store', 'symbol'])
sig = np.array(r.p_signal[:,0])
intsig = np.array(r.p_signal[:,0])
sig_len = len(sig)
sym = ann.symbol
pos = ann.sample
if 'U' in labels:
sym_len = len(sym)
for i in range(0, sym_len, 4):
if int(pos[i] + pos[i+1]) > 100:
sym.append('U')
pos = np.append(pos,[int((pos[i] + pos[i+1]) / 2)])
# if 'U' in labels:
# print(d)
# for matrix, i in zip(matrix_l[data_names.index(d)], range(len(matrix_l[data_names.index(d)]))):
# if len(matrix) == 0:
# sym.append('U')
# pos = np.append(pos,pos[i])
beat_len = len(sym)
for i in range(beat_len):
for j in range(-augmentation[0]*augmentation[1],augmentation[0]*augmentation[1]+1,augmentation[1]):
if pos[i]-half_window+j>=0 and pos[i]+half_window+j<=sig_len and sym[i] in sub_labels:
frame = sig[pos[i]-half_window+j:pos[i]+half_window+j]
X.append(frame)
Y.append(labels.index(sub_labels[sym[i]]))
C.append(True if j == 0 else False)
R.append(data_names.index(d))
P.append(pos[i])
printProgressBar(data_names.index(d) + 1, len(data_names), prefix = 'Dataset building:', suffix = '', length = 50)
t_done = False
t_dict = {'prefix' : f'{color.NONE}Data loader{color.END}: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
item_perm = np.arange(np.size(X,0))
np.random.seed(random_seed)
np.random.shuffle(item_perm)
X = np.array(X)[item_perm]
Y = np.array(Y)[item_perm]
C = np.array(C)[item_perm]
R = np.array(R)[item_perm]
P = np.array(P)[item_perm]
X_train = X[:round(np.size(X,0)*dataset_split)]
Y_train = Y[:round(np.size(X,0)*dataset_split)]
# C_train = C[:round(np.size(X,0)*dataset_split)]
# R_train = R[:round(np.size(X,0)*dataset_split)]
# P_train = P[:round(np.size(X,0)*dataset_split)]
X_valid = X[round(np.size(X,0)*dataset_split):]
Y_valid = Y[round(np.size(X,0)*dataset_split):]
C_valid = C[round(np.size(X,0)*dataset_split):]
R_valid = R[round(np.size(X,0)*dataset_split):]
P_valid = P[round(np.size(X,0)*dataset_split):]
X_valid = X_valid[C_valid]
Y_valid = Y_valid[C_valid]
R_valid = R_valid[C_valid]
P_valid = P_valid[C_valid]
if normalization:
for i in range(np.size(X_train,0)):
X_train[i]=X_train[i]/np.max(np.absolute(X_train[i]))
for i in range(np.size(X_valid,0)):
X_valid[i]=X_valid[i]/np.max(np.absolute(X_valid[i]))
X_train = torch.from_numpy(X_train)
X_valid = torch.from_numpy(X_valid)
Y_train = torch.from_numpy(Y_train)
Y_valid = torch.from_numpy(Y_valid)
X_train.unsqueeze_(1)
X_train.unsqueeze_(1)
X_valid.unsqueeze_(1)
X_valid.unsqueeze_(1)
t_dataset_train = torch.utils.data.TensorDataset(X_train,Y_train)
t_dataset_valid = torch.utils.data.TensorDataset(X_valid,Y_valid)
loader_train = torch.utils.data.DataLoader(t_dataset_train, batch_size=value_batch_size, shuffle=True)
loader_valid = torch.utils.data.DataLoader(t_dataset_valid, batch_size=value_batch_size, shuffle=True)
t_done = True
time.sleep(0.1)
print('\n')
#███╗ ██╗███████╗████████╗██╗ ██╗ ██████╗ ██████╗ ██╗ ██╗
#████╗ ██║██╔════╝╚══██╔══╝██║ ██║██╔═══██╗██╔══██╗██║ ██╔╝
#██╔██╗ ██║█████╗ ██║ ██║ █╗ ██║██║ ██║██████╔╝█████╔╝
#██║╚██╗██║██╔══╝ ██║ ██║███╗██║██║ ██║██╔══██╗██╔═██╗
#██║ ╚████║███████╗ ██║ ╚███╔███╔╝╚██████╔╝██║ ██║██║ ██╗
#╚═╝ ╚═══╝╚══════╝ ╚═╝ ╚══╝╚══╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝
conv_indim = json_data['conv_indim']
pool_ks = json_data['pool_ks']
conv_1_if = json_data['conv_1_if']
conv_1_of = json_data['conv_1_of']
conv_1_ks = json_data['conv_1_ks']
conv_2_if = json_data['conv_2_if']
conv_2_of = json_data['conv_2_of']
conv_2_ks = json_data['conv_2_ks']
fully_1_indim = json_data['fully_1_indim']
fully_1_outdim = json_data['fully_1_outdim']
fully_2_indim = json_data['fully_2_indim']
fully_2_outdim = json_data['fully_2_outdim']
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.relu6 = False
self.debug = False
self.quantization = False
self.quantization_inf = False
self.temp = 0
self.minoutput_0 = 0
self.maxoutput_0 = 0
self.conv1 = nn.Conv2d(conv_1_if, conv_1_of, (1, conv_1_ks), bias=False)
self.conv2 = nn.Conv2d(conv_2_if, conv_2_of, (1, conv_2_ks), bias=False)
self.pool = nn.MaxPool2d((1, pool_ks))
self.fc1 = nn.Linear(fully_1_indim, fully_1_outdim, bias=False)
self.fc2 = nn.Linear(fully_2_indim, fully_2_outdim, bias=False)
self.sm = nn.Softmax(dim=-1)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
if(self.debug):
torch.set_printoptions(threshold=500000, precision=10) #, linewidth=20
f = open(session_train_path+"/inference_data_example/input_"+str(self.temp)+".txt", "w")
f.write("\n\ndequant\n")
f.write(str(x))
if(self.quantization):
x = self.quant(x)
if(self.debug):
f.write("\n\nquant\n")
f.write(str(x))
x = self.conv1(x)
if(self.quantization_inf):
if(torch.min(x)<self.minoutput_0):
self.minoutput_0 = torch.min(x)
if(torch.max(x)>self.maxoutput_0):
self.maxoutput_0 = torch.max(x)
if(self.debug):
f.write("\n\nconv1\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
f.write("\n\nrelu1\n")
f.write(str(x))
x = self.pool(x)
if(self.debug):
f.write("\n\npool1\n")
f.write(str(x))
x = self.conv2(x)
if(self.debug):
f.write("\n\nconv2\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
f.write("\n\nrelu2\n")
f.write(str(x))
x = self.pool(x)
if(self.debug):
f.write("\n\npool2\n")
f.write(str(x))
x = x.flatten(1)
if(self.debug):
f.write("\n\nflatten\n")
f.write(str(x))
x=self.fc1(x)
if(self.debug):
f.write("\n\nfc1\n")
f.write(str(x))
x = F.relu6(x)
if(self.debug):
f.write("\n\nrelu3\n")
f.write(str(x))
x = self.fc2(x)
if(self.debug):
f.write("\n\nfc2\n")
f.write(str(x))
if(self.quantization):
x = self.dequant(x)
if(self.debug):
f.write("\n\ndequant\n\n")
f.write(str(x))
f.close()
# x = self.sm(x)
return x
# Fuse Conv+BN and Conv+BN+Relu modules prior to quantization
# This operation does not change the numerics
def fuse_model(self):
for m in self.modules():
if type(m) == ConvBNReLU:
torch.quantization.fuse_modules(m, ['0', '1', '2'], inplace=True)
if type(m) == InvertedResidual:
for idx in range(len(m.conv)):
if type(m.conv[idx]) == nn.Conv2d:
torch.quantization.fuse_modules(m.conv, [str(idx), str(idx + 1)], inplace=True)
# ██╗ ██████╗ █████╗ ██████╗ ███╗ ███╗ ██████╗ ██████╗ ███████╗██╗
# ██║ ██╔═══██╗██╔══██╗██╔══██╗ ████╗ ████║██╔═══██╗██╔══██╗██╔════╝██║
# ██║ ██║ ██║███████║██║ ██║ ██╔████╔██║██║ ██║██║ ██║█████╗ ██║
# ██║ ██║ ██║██╔══██║██║ ██║ ██║╚██╔╝██║██║ ██║██║ ██║██╔══╝ ██║
# ███████╗╚██████╔╝██║ ██║██████╔╝ ██║ ╚═╝ ██║╚██████╔╝██████╔╝███████╗███████╗
# ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝
model = Net()
model.load_state_dict(torch.load(session_train_path+'/model.pth'))
model.eval()
model_quantized = copy.deepcopy(model)
model_quantized.quantization = True
eval_batch_size = value_batch_size
num_calibration_batches = math.ceil(X_train.size(0)/eval_batch_size)
num_eval_batches = math.ceil(X_train.size(0)/eval_batch_size)
print(f'\n{color.NONE}Post-training quantization{color.END}')
model_quantized.eval()
model_quantized.qconfig = torch.quantization.get_default_qconfig('qnnpack')
torch.backends.quantized.engine = 'qnnpack'
torch.quantization.prepare(model_quantized, inplace=True)
evaluate(model_quantized, criterion, loader_train, neval_batches=num_calibration_batches)
t_done = False
t_dict = {'prefix' : 'Covert: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
torch.quantization.convert(model_quantized, inplace=True)
t_done = True
time.sleep(0.1)
# # ██████╗ ██████╗ ██████╗
# # ██╔══██╗██╔═══██╗██╔════╝
# # ██████╔╝██║ ██║██║
# # ██╔══██╗██║ ██║██║
# # ██║ ██║╚██████╔╝╚██████╗
# # ╚═╝ ╚═╝ ╚═════╝ ╚═════╝
#
# from scipy import interp
# from sklearn.metrics import confusion_matrix, roc_curve, auc
# from matplotlib import pyplot as plt
#
# def str2bool(s):
# if s == "True":
# return True
# elif s == "False":
# return False
# else:
# raise NotImplementedError
#
# def get_output(model, loader, with_prob=True):
# y_pred, y_true, = [], []
# if with_prob:
# y_prob = []
# else:
# y_prob = None
# for inputs, labels in loader:
# # if torch.cuda.is_available():
# # inputs = inputs.cuda()
# # labels = labels.cuda()
# outputs = model(inputs.float())
# _, preds = torch.max(outputs, 1)
# if with_prob:
# probs = torch.nn.functional.softmax(outputs, dim=1)
# else:
# probs = None
# y_pred.append(preds.cpu().numpy())
# y_true.append(labels.cpu().numpy())
# if with_prob:
# y_prob.append(probs.detach().cpu().numpy())
# y_pred = np.concatenate(y_pred)
# y_true = np.concatenate(y_true)
# if with_prob:
# y_prob = np.concatenate(y_prob)
# return y_pred, y_true, y_prob
#
# def print_roc_curve(y_test, y_score, n_classes, figsize = (8, 6)):
# lw = 1
# # Compute ROC curve and ROC area for each class
# fpr = dict()
# tpr = dict()
# roc_auc = dict()
# for i in range(n_classes):
# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
# # Compute micro-average ROC curve and ROC area
# fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
# roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# # First aggregate all false positive rates
# all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# # Then interpolate all ROC curves at this points
# mean_tpr = np.zeros_like(all_fpr)
# for i in range(n_classes):
# mean_tpr += interp(all_fpr, fpr[i], tpr[i])
#
# # Finally average it and compute AUC
# mean_tpr /= n_classes
#
# fpr["macro"] = all_fpr
# tpr["macro"] = mean_tpr
# fpr["macro"] = np.insert(fpr["macro"],0,0)
# tpr["macro"] = np.insert(tpr["macro"],0,0)
# print(fpr["macro"])
# print(tpr["macro"])
# roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# fig = plt.figure(figsize=figsize)
# """
# plt.plot(fpr["micro"], tpr["micro"],
# label='micro-average ROC curve (area = {0:0.5f})'
# ''.format(roc_auc["micro"]),
# color='deeppink', linestyle=':', linewidth=4)
# """
# plt.plot(fpr["macro"], tpr["macro"],
# # label='macro-average ROC curve (area = {0:0.5f})'
# label='ROC curve (AUC = {0:0.5f})'
# ''.format(roc_auc["macro"])
# # ,color='navy'#, linestyle=':', linewidth=4
# )
# plt.plot([0, 1], [0, 1], 'k--', lw=lw)
# plt.xlim([-0.003, 1.0])
# plt.ylim([0.0, 1.003])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# #plt.title('Some extension of Receiver operating characteristic to multi-class')
# plt.legend(loc="lower right")
# plt.show()
# return fig
#
# # obtain outputs of the model
#
# # model = torch.load(args.ckpt)
#
# # if args.multilabel:
# # alloc_label = multi_label
# # else:
# # alloc_label = binary_label
#
# alloc_label = True
#
# # test_dataset = EarDataset(binary_dir=args.data_dir,
# # alloc_label = alloc_label,
# # transforms=transforms.Compose([Rescale((256, 256)), ToTensor(), Normalize()]))
# # test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=8)
# test_loader = loader_valid
# y_pred, y_true, y_score = get_output(model_quantized, test_loader)
# print(y_pred.shape, y_true.shape, y_score.shape)
#
# # exit()
#
# # # save the confusion matrix
# # with open(args.labels, 'r+') as f:
# # labels = f.readlines()
# # labels = [l.replace('\n', '') for l in labels]
# # if not args.multilabel:
# # labels = ['Normal', 'Abnormal']
# # if not os.path.exists(args.result_dir):
# # os.mkdir(args.result_dir)
# # cnf_matrix = confusion_matrix(y_true, y_pred, labels=np.arange(len(labels)))
# # np.set_printoptions(precision=2)
# # fig = print_confusion_matrix(cnf_matrix, labels, figsize=(16,14), fontsize=10)
# # fig.savefig(os.path.join(args.result_dir, args.cfmatrix_name))
#
# # save the roc curve
# y_onehot = np.zeros((y_true.shape[0], len(labels)), dtype=np.uint8)
# y_onehot[np.arange(y_true.shape[0]), y_true] = 1
# sums = y_onehot.sum(axis=0)
# useless_cols = []
# for i, c in enumerate(sums):
# if c == 0:
# print('useless column {}'.format(i))
# useless_cols.append(i)
# useful_cols = np.array([i for i in range(len(labels)) if i not in useless_cols])
# # if args.multilabel:
# y_onehot = y_onehot[:,useful_cols]
# y_score = y_score[:,useful_cols]
# fig = print_roc_curve(y_onehot, y_score, useful_cols.shape[0], figsize=(8,6))
# # fig.savefig(os.path.join(args.result_dir, args.roc_name))
# fig.savefig(os.path.join("output/temp", "roc"))
# # pltfig = fig.add_subplot(1, 1, 1)
# # plt.plot(pltfig)
# # plt.show()
#
# exit()
# ██████╗ ███████╗ █████╗ ██╗ ██╗ ███████╗██╗ ██╗ █████╗ ██╗ ██╗ ██╗ █████╗ ████████╗██╗ ██████╗ ███╗ ██╗
# ██╔══██╗██╔════╝██╔══██╗██║ ██╔╝ ██╔════╝██║ ██║██╔══██╗██║ ██║ ██║██╔══██╗╚══██╔══╝██║██╔═══██╗████╗ ██║
# ██████╔╝█████╗ ███████║█████╔╝ █████╗ ██║ ██║███████║██║ ██║ ██║███████║ ██║ ██║██║ ██║██╔██╗ ██║
# ██╔═══╝ ██╔══╝ ██╔══██║██╔═██╗ ██╔══╝ ╚██╗ ██╔╝██╔══██║██║ ██║ ██║██╔══██║ ██║ ██║██║ ██║██║╚██╗██║
# ██║ ███████╗██║ ██║██║ ██╗ ███████╗ ╚████╔╝ ██║ ██║███████╗╚██████╔╝██║ ██║ ██║ ██║╚██████╔╝██║ ╚████║
# ╚═╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚══════╝ ╚═══╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
wid = 99
with open(session_peak_path+'/peakpos.pickle', "rb") as output_file:
PEAKPOS = pk.load(output_file)
with open(f'{session_peak_path}/matrix_l.pickle', 'rb') as input_file:
matrix_l = pk.load(input_file)
with open(f'{session_peak_path}/matrix_p.pickle', 'rb') as input_file:
matrix_p = pk.load(input_file)
with open(f'{session_peak_path}/fp_pos.pickle', 'rb') as input_file:
fp_pos = pk.load(input_file)
max_i = []
valid_filter = []
X_valid_filtered = []
Y_valid_filtered = []
P_valid_filtered = []
tp_cmatrix_float = []
tp_cmatrix_float_centered = []
tp_cmatrix_fixed = []
tp_cmatrix_fixed_centered = []
tp_cmatrix = np.zeros((fully_2_outdim, fully_2_outdim), dtype=int)
fp_vector = []
# printProgressBar(0, len(data_names), prefix = 'Files analyzed:', suffix = '', length = 55)
for d in data_names:
tp_cmatrix_float.append([[(0) for i in range(len(labels))] for i in range(len(labels))])
tp_cmatrix_float_centered.append([[(0) for i in range(len(labels))] for i in range(len(labels))])
tp_cmatrix_fixed.append([[(0) for i in range(len(labels))] for i in range(len(labels))])
tp_cmatrix_fixed_centered.append([[(0) for i in range(len(labels))] for i in range(len(labels))])
fp_vector.append([])
r = wfdb.rdrecord('./dataset/raw/'+d)
ann = wfdb.rdann('./dataset/raw/'+d, 'atr', return_label_elements=['label_store', 'symbol'])
sig = np.array(r.p_signal[:,0])
sig_len = len(sig)
sym = ann.symbol
pos = ann.sample
list.clear(valid_filter)
for r in R_valid:
if r == data_names.index(d):
valid_filter.append(True)
else:
valid_filter.append(False)
P_valid_filtered = P_valid[valid_filter]
Y_valid_filtered = Y_valid[valid_filter]
for couple in matrix_l[data_names.index(d)]:
if len(couple):
if couple[0] in P_valid_filtered:
if (couple[1] - wid >= 0) and (couple[1] + wid < sig_len):
input = torch.from_numpy(sig[couple[1]-wid:couple[1]+wid])
input.unsqueeze_(0)
input.unsqueeze_(0)
input.unsqueeze_(0)
output = model(input.float())
m=max(output[0])
indx=list(output[0]).index(m)
tp_cmatrix_float[data_names.index(d)][int(Y_valid_filtered[np.where(P_valid_filtered == couple[0])[0][0]])][indx] += 1
if (couple[0] - wid >= 0) and (couple[0] + wid < sig_len):
input = torch.from_numpy(sig[couple[0]-wid:couple[0]+wid])
input.unsqueeze_(0)
input.unsqueeze_(0)
input.unsqueeze_(0)
output = model(input.float())
m=max(output[0])
indx=list(output[0]).index(m)
tp_cmatrix_float_centered[data_names.index(d)][int(Y_valid_filtered[np.where(P_valid_filtered == couple[0])[0][0]])][indx] += 1
if (couple[1] - wid >= 0) and (couple[1] + wid < sig_len):
input = torch.from_numpy(sig[couple[1]-wid:couple[1]+wid])
input.unsqueeze_(0)
input.unsqueeze_(0)
input.unsqueeze_(0)
output = model_quantized(input.float())
m=max(output[0])
indx=list(output[0]).index(m)
tp_cmatrix_fixed[data_names.index(d)][int(Y_valid_filtered[np.where(P_valid_filtered == couple[0])[0][0]])][indx] += 1
if (couple[0] - wid >= 0) and (couple[0] + wid < sig_len):
input = torch.from_numpy(sig[couple[0]-wid:couple[0]+wid])
input.unsqueeze_(0)
input.unsqueeze_(0)
input.unsqueeze_(0)
output = model_quantized(input.float())
m=max(output[0])
indx=list(output[0]).index(m)
tp_cmatrix_fixed_centered[data_names.index(d)][int(Y_valid_filtered[np.where(P_valid_filtered == couple[0])[0][0]])][indx] += 1
for f in fp_pos[data_names.index(d)]:
if (f - wid >= 0) and (f + wid < sig_len):
input = torch.from_numpy(sig[f-wid:f+wid])
input.unsqueeze_(0)
input.unsqueeze_(0)
input.unsqueeze_(0)
output = model_quantized(input.float())
m = max(output[0])
indx = list(output[0]).index(m)
fp_vector[data_names.index(d)].append(indx)
print(f'\n\nfile: {d}')
d_sum = 0
i_sum = 0
j_sum = 0
for i in range(len(tp_cmatrix_fixed[data_names.index(d)])):
for j in range(len(tp_cmatrix_fixed[data_names.index(d)][i])):
print(f'{tp_cmatrix_fixed[data_names.index(d)][i][j]:04d} ',end='')
j_sum += tp_cmatrix_fixed[data_names.index(d)][i][j]
d_sum += tp_cmatrix_fixed[data_names.index(d)][i][i]
i_sum += j_sum
if j_sum:
print(f'{tp_cmatrix_fixed[data_names.index(d)][i][i]/j_sum}')
else:
print('-')
j_sum = 0
if i_sum:
print(f'total accuracy: {d_sum/i_sum}\n')
else:
print(f'-\n')
print(f'file: {d} (centered)')
d_sum = 0
i_sum = 0
j_sum = 0
for i in range(len(tp_cmatrix_fixed_centered[data_names.index(d)])):
for j in range(len(tp_cmatrix_fixed_centered[data_names.index(d)][i])):
print(f'{tp_cmatrix_fixed_centered[data_names.index(d)][i][j]:04d} ',end='')
j_sum += tp_cmatrix_fixed_centered[data_names.index(d)][i][j]
d_sum += tp_cmatrix_fixed_centered[data_names.index(d)][i][i]
i_sum += j_sum
if j_sum:
print(f'{tp_cmatrix_fixed_centered[data_names.index(d)][i][i]/j_sum}')
else:
print('-')
j_sum = 0
if i_sum:
print(f'total accuracy: {d_sum/i_sum}\n')
else:
print(f'-\n')
print(fp_vector)
cnt_t = 0
cnt_n = 0
for fp in fp_vector:
for f in fp:
if f == 0:
cnt_n += 1
cnt_t += 1
print(cnt_t)
print(cnt_n)
print('\n')
t_done = False
t_dict = {'prefix' : f'{color.NONE}Exporting data{color.END}: '}
t = threading.Thread(target=animate, kwargs=t_dict)
t.start()
with open(session_path+'tp_cmatrix_fixed.pickle', 'wb') as output_file:
pk.dump(tp_cmatrix_fixed, output_file)
with open(session_path+'tp_cmatrix_fixed_centered.pickle', 'wb') as output_file:
pk.dump(tp_cmatrix_fixed_centered, output_file)
with open(session_path+'tp_cmatrix_float.pickle', 'wb') as output_file:
pk.dump(tp_cmatrix_float, output_file)
with open(session_path+'tp_cmatrix_float_centered.pickle', 'wb') as output_file:
pk.dump(tp_cmatrix_float_centered, output_file)
training_parameters = {
'session_name': session_name,
'session_train_path' : session_train_path,
'session_peak_path' : session_peak_path
}
with open(session_path+'evaluation_summary.json', 'w') as json_file:
json.dump(training_parameters, json_file, indent=2)
t_done = True
time.sleep(0.2)
print(f'{color.BOLD}\n\n\nEnding {color.NONE}evaluation{color.END}{color.BOLD} session \'{session_name}\'{color.END}')
matrix = np.zeros((5,5))
for cmat in tp_cmatrix_fixed_centered:
matrix = np.add(matrix,np.array(cmat))
np.set_printoptions(suppress=True),
print(matrix)
| 31,326 | 30.901222 | 147 | py |
neural-tangents | neural-tangents-main/setup.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup the package with pip."""
import os
import setuptools
# https://packaging.python.org/guides/making-a-pypi-friendly-readme/
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
INSTALL_REQUIRES = [
'jax>=0.4.6',
'frozendict>=2.3',
'typing_extensions>=4.0.1',
'tf2jax>=0.3.3',
]
TESTS_REQUIRES = [
'more-itertools',
'tensorflow-datasets',
'flax>=0.5.2',
]
def _get_version() -> str:
"""Returns the package version.
Adapted from:
https://github.com/deepmind/tf2jax/blob/41ea640c7525ed42f7dcd02937b1f308f8949521/setup.py#L24
Returns:
Version number.
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'neural_tangents', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not found in `neural_tangents/__init__.py`')
setuptools.setup(
name='neural-tangents',
version=_get_version(),
license='Apache 2.0',
author='Neural Tangents',
author_email='neural-tangents-dev@google.com',
install_requires=INSTALL_REQUIRES,
extras_require={
'testing': TESTS_REQUIRES,
},
url='https://github.com/google/neural-tangents',
download_url='https://pypi.org/project/neural-tangents/',
project_urls={
'Source Code':
'https://github.com/google/neural-tangents',
'Paper':
'https://arxiv.org/abs/1912.02803',
'Finite Width NTK paper':
'https://arxiv.org/abs/2206.08720',
'Video':
'https://iclr.cc/virtual_2020/poster_SklD9yrFPS.html',
'Finite Width NTK video':
'https://youtu.be/8MWOhYg89fY?t=10984',
'Documentation':
'https://neural-tangents.readthedocs.io/en/latest/?badge=latest',
'Bug Tracker':
'https://github.com/google/neural-tangents/issues',
'Release Notes':
'https://github.com/google/neural-tangents/releases',
'PyPi':
'https://pypi.org/project/neural-tangents/',
'Linux Tests':
'https://github.com/google/neural-tangents/actions/workflows/linux.yml',
'macOS Tests':
'https://github.com/google/neural-tangents/actions/workflows/macos.yml',
'Pytype':
'https://github.com/google/neural-tangents/actions/workflows/pytype.yml',
'Coverage':
'https://app.codecov.io/gh/google/neural-tangents'
},
packages=setuptools.find_packages(exclude=('presentation',)),
long_description=long_description,
long_description_content_type='text/markdown',
description='Fast and Easy Infinite Neural Networks in Python',
python_requires='>=3.8',
classifiers=[
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Development Status :: 4 - Beta',
])
| 4,462 | 31.34058 | 95 | py |
neural-tangents | neural-tangents-main/examples/empirical_ntk.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using different implementations for empirical NTK computation.
All implementations apply to any differentiable functions, (not necessarily ones
constructed with Neural Tangents).
For details about the empirical (finite width) NTK computation, please see
"`Fast Finite Width Neural Tangent Kernel <https://arxiv.org/abs/2206.08720>`_".
"""
from absl import app
import jax
from jax import numpy as np
from jax import random
import neural_tangents as nt
from neural_tangents import stax
def main(unused_argv):
key1, key2, key3 = random.split(random.PRNGKey(1), 3)
x1 = random.normal(key1, (2, 8, 8, 3))
x2 = random.normal(key2, (3, 8, 8, 3))
# A vanilla CNN.
init_fn, f, _ = stax.serial(
stax.Conv(8, (3, 3)),
stax.Relu(),
stax.Conv(8, (3, 3)),
stax.Relu(),
stax.Conv(8, (3, 3)),
stax.Flatten(),
stax.Dense(10)
)
_, params = init_fn(key3, x1.shape)
kwargs = dict(
f=f,
trace_axes=(),
vmap_axes=0,
)
# Default, baseline Jacobian contraction.
jacobian_contraction = nt.empirical_ntk_fn(
**kwargs,
implementation=nt.NtkImplementation.JACOBIAN_CONTRACTION)
# (6, 3, 10, 10) full `np.ndarray` test-train NTK
ntk_jc = jacobian_contraction(x2, x1, params)
# NTK-vector products-based implementation.
ntk_vector_products = nt.empirical_ntk_fn(
**kwargs,
implementation=nt.NtkImplementation.NTK_VECTOR_PRODUCTS)
ntk_vp = ntk_vector_products(x2, x1, params)
# Structured derivatives-based implementation.
structured_derivatives = nt.empirical_ntk_fn(
**kwargs,
implementation=nt.NtkImplementation.STRUCTURED_DERIVATIVES)
ntk_sd = structured_derivatives(x2, x1, params)
# Auto-FLOPs-selecting implementation. Doesn't work correctly on CPU/GPU.
auto = nt.empirical_ntk_fn(
**kwargs,
implementation=nt.NtkImplementation.AUTO)
ntk_auto = auto(x2, x1, params)
# Check that implementations match
for ntk1 in [ntk_jc, ntk_vp, ntk_sd, ntk_auto]:
for ntk2 in [ntk_jc, ntk_vp, ntk_sd, ntk_auto]:
diff = np.max(np.abs(ntk1 - ntk2))
print(f'NTK implementation diff {diff}.')
assert diff < (1e-4 if jax.default_backend() != 'tpu' else 0.1), diff
print('All NTK implementations match.')
if __name__ == '__main__':
app.run(main)
| 2,892 | 29.135417 | 80 | py |
neural-tangents | neural-tangents-main/examples/imdb.py | """An example doing inference with an infinitely wide attention network on IMDb.
Adapted from
https://github.com/google/neural-tangents/blob/main/examples/infinite_fcn.py
By default, this example does inference on a very small subset, and uses small
word embeddings for performance. A 300/300 train/test split takes 30 seconds
on a machine with 2 Titan X Pascal GPUs, please adjust settings accordingly.
For details, please see "`Infinite attention: NNGP and NTK for deep attention
networks <https://arxiv.org/abs/2006.10540>`_".
"""
import time
from typing import Tuple
from absl import app
from jax import random
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from examples import datasets
from examples import util
_TRAIN_SIZE = 300 # Dataset size to use for training.
_TEST_SIZE = 300 # Dataset size to use for testing.
_BATCH_SIZE = 15 # Batch size for kernel computation. 0 for no batching.
_MAX_SENTENCE_LENGTH = 500 # Pad/truncate sentences to this length.
_GLOVE_PATH = '/tmp/glove.6B.50d.txt' # Path to GloVe word embeddings.
_IMDB_PATH = '/tmp/imdb_reviews' # Path to imdb sentences.
def main(*args, use_dummy_data: bool = False, **kwargs) -> None:
# Mask all padding with this value.
mask_constant = 100.
if use_dummy_data:
x_train, y_train, x_test, y_test = _get_dummy_data(mask_constant)
else:
# Build data pipelines.
print('Loading IMDb data.')
x_train, y_train, x_test, y_test = datasets.get_dataset(
name='imdb_reviews',
n_train=_TRAIN_SIZE,
n_test=_TEST_SIZE,
do_flatten_and_normalize=False,
data_dir=_IMDB_PATH,
input_key='text')
# Embed words and pad / truncate sentences to a fixed size.
x_train, x_test = datasets.embed_glove(
xs=[x_train, x_test],
glove_path=_GLOVE_PATH,
max_sentence_length=_MAX_SENTENCE_LENGTH,
mask_constant=mask_constant)
# Build the infinite network.
# Not using the finite model, hence width is set to 1 everywhere.
_, _, kernel_fn = stax.serial(
stax.Conv(out_chan=1, filter_shape=(9,), strides=(1,), padding='VALID'),
stax.Relu(),
stax.GlobalSelfAttention(
n_chan_out=1,
n_chan_key=1,
n_chan_val=1,
pos_emb_type='SUM',
W_pos_emb_std=1.,
pos_emb_decay_fn=lambda d: 1 / (1 + d**2),
n_heads=1),
stax.Relu(),
stax.GlobalAvgPool(),
stax.Dense(out_dim=1)
)
# Optionally, compute the kernel in batches, in parallel.
kernel_fn = nt.batch(kernel_fn, device_count=-1, batch_size=_BATCH_SIZE)
start = time.time()
# Bayesian and infinite-time gradient descent inference with infinite network.
predict = nt.predict.gradient_descent_mse_ensemble(
kernel_fn=kernel_fn,
x_train=x_train,
y_train=y_train,
diag_reg=1e-6,
mask_constant=mask_constant)
fx_test_nngp, fx_test_ntk = predict(x_test=x_test, get=('nngp', 'ntk'))
fx_test_nngp.block_until_ready()
fx_test_ntk.block_until_ready()
duration = time.time() - start
print(f'Kernel construction and inference done in {duration} seconds.')
# Print out accuracy and loss for infinite network predictions.
loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat) ** 2)
util.print_summary('NNGP test', y_test, fx_test_nngp, None, loss)
util.print_summary('NTK test', y_test, fx_test_ntk, None, loss)
def _get_dummy_data(mask_constant: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Return dummy data for when downloading embeddings is not feasible."""
n_train, n_test = 6, 6
def get_x(shape, key):
key_x, key_mask = random.split(key)
x = random.normal(key_x, shape)
mask = random.bernoulli(key_mask, 0.6, shape)
x = np.where(mask, mask_constant, x)
return x
def get_y(x):
x = np.where(x == mask_constant, 0., x)
def weighted_sum(x, start, end):
return np.sum(x[..., start:end] *
np.arange(x.shape[1])[None, ..., None],
axis=(1, 2))
y_label = np.stack([weighted_sum(x, 0, x.shape[-1] // 2),
weighted_sum(x, x.shape[-1] // 2, x.shape[-1])],
axis=-1) > 0
y = np.where(y_label, 0.5, -0.5)
return y
rng_train, rng_test = random.split(random.PRNGKey(1), 2)
x_train = get_x((n_train, _MAX_SENTENCE_LENGTH, 50), rng_train)
x_test = get_x((n_test, _MAX_SENTENCE_LENGTH, 50), rng_test)
y_train, y_test = get_y(x_train), get_y(x_test)
return x_train, y_train, x_test, y_test
if __name__ == '__main__':
app.run(main)
| 4,628 | 32.543478 | 80 | py |
neural-tangents | neural-tangents-main/examples/elementwise_numerical.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of approximating the NNGP and NTK using quadrature and autodiff.
For details, see :obj:`~neural_tangents.stax.ElementwiseNumerical` and "`Fast
Neural Kernel Embeddings for General Activations
<https://arxiv.org/abs/2209.04121>`_".
"""
from absl import app
from jax import numpy as np
from jax import random
import jax.nn
from neural_tangents import stax
def main(unused_argv):
key1, key2 = random.split(random.PRNGKey(1))
x1 = random.normal(key1, (10, 3))
x2 = random.normal(key2, (20, 3))
# Consider a nonlinearity for which we know the closed-form expression (GeLU).
_, _, kernel_fn_closed_form = stax.serial(
stax.Dense(1),
stax.Gelu(), # Contains the closed-form GeLU NNGP/NTK expression.
stax.Dense(1)
)
kernel_closed_form = kernel_fn_closed_form(x1, x2)
# Construct the layer from only the elementwise forward-pass GeLU.
_, _, kernel_fn_numerical = stax.serial(
stax.Dense(1),
# Approximation using Gaussian quadrature and autodiff.
stax.ElementwiseNumerical(jax.nn.gelu, deg=25),
stax.Dense(1)
)
kernel_numerical = kernel_fn_numerical(x1, x2)
# The two kernels are close!
assert np.max(np.abs(kernel_closed_form.nngp - kernel_numerical.nngp)) < 1e-3
assert np.max(np.abs(kernel_closed_form.ntk - kernel_numerical.ntk)) < 1e-3
print('Gaussian quadrature approximation of the kernel is accurate!')
if __name__ == '__main__':
app.run(main)
| 2,016 | 32.616667 | 80 | py |
neural-tangents | neural-tangents-main/examples/datasets.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets used in examples."""
import gzip
import os
import shutil
import urllib.request
from jax import random
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def _partial_flatten_and_normalize(x):
"""Flatten all but the first dimension of an `np.ndarray`."""
x = np.reshape(x, (x.shape[0], -1))
return (x - np.mean(x)) / np.std(x)
def _one_hot(x, k, dtype=np.float32):
"""Create a one-hot encoding of x of size k."""
return np.array(x[:, None] == np.arange(k), dtype)
def get_dataset(name,
n_train=None,
n_test=None,
permute_train=False,
do_flatten_and_normalize=True,
data_dir=None,
input_key='image'):
"""Download, parse and process a dataset to unit scale and one-hot labels."""
# Need this following http://cl/378185881 to prevent GPU test breakages.
tf.config.set_visible_devices([], 'GPU')
ds_builder = tfds.builder(name)
ds_train, ds_test = tfds.as_numpy(
tfds.load(
name + (':3.*.*' if name != 'imdb_reviews' else ''),
split=['train' + ('[:%d]' % n_train if n_train is not None else ''),
'test' + ('[:%d]' % n_test if n_test is not None else '')],
batch_size=-1,
as_dataset_kwargs={'shuffle_files': False},
data_dir=data_dir))
train_images, train_labels, test_images, test_labels = (ds_train[input_key],
ds_train['label'],
ds_test[input_key],
ds_test['label'])
if do_flatten_and_normalize:
train_images = _partial_flatten_and_normalize(train_images)
test_images = _partial_flatten_and_normalize(test_images)
num_classes = ds_builder.info.features['label'].num_classes
train_labels = _one_hot(train_labels, num_classes)
test_labels = _one_hot(test_labels, num_classes)
if permute_train:
perm = np.random.RandomState(0).permutation(train_images.shape[0])
train_images = train_images[perm]
train_labels = train_labels[perm]
return train_images, train_labels, test_images, test_labels
def minibatch(x_train, y_train, batch_size, train_epochs):
"""Generate minibatches of data for a set number of epochs."""
epoch = 0
start = 0
key = random.PRNGKey(0)
while epoch < train_epochs:
end = start + batch_size
if end > x_train.shape[0]:
key, split = random.split(key)
permutation = random.permutation(
split,
np.arange(x_train.shape[0], dtype=np.int64),
independent=True
)
x_train = x_train[permutation]
y_train = y_train[permutation]
epoch += 1
start = 0
continue
yield x_train[start:end], y_train[start:end]
start = start + batch_size
def embed_glove(xs, glove_path, max_sentence_length=1000, mask_constant=1000.):
"""Embed a list of string arrays into GloVe word embeddings.
Adapted from https://keras.io/examples/pretrained_word_embeddings/.
Args:
xs: list of string numpy arrays to embed.
glove_path: path to the GloVe embedding file.
max_sentence_length: pad/truncate embeddings to this length.
mask_constant: mask padding with this constant.
Returns:
xs with words replaced by word embeddings, padded/truncated to a fixed
length, with padding masked with the given constant.
"""
xs = list(map(_decode, xs))
tokenizer = tf.keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(np.concatenate(xs))
glove_embedding_layer = _get_glove_embedding_layer(tokenizer,
glove_path,
max_sentence_length)
def embed(x):
# Replace strings with sequences of integer tokens.
x_tok = tokenizer.texts_to_sequences(x)
lenghts = np.array([len(s) for s in x_tok])
# Pad all sentences to a fixed max sentence length.
x_tok = tf.keras.preprocessing.sequence.pad_sequences(
x_tok,
max_sentence_length,
padding='post',
truncating='post')
# Replace integer tokens with word embeddings.
x_emb = glove_embedding_layer(x_tok).numpy()
# Mask padding tokens.
mask = np.arange(max_sentence_length)[None, :] >= lenghts[:, None]
x_emb[mask, ...] = mask_constant
return x_emb
return map(embed, xs)
def _get_glove_embedding_layer(tokenizer, glove_path, max_sentence_length):
"""Get a Keras embedding layer for a given GloVe embeddings.
Adapted from https://keras.io/examples/pretrained_word_embeddings/.
Args:
tokenizer: the `keras.preprocessing.text.Tokenizer` used to tokenize inputs.
glove_path: path to the GloVe embedding file.
max_sentence_length: pad/truncate embeddings to this length.
Returns:
Keras embedding layer for a given GloVe embeddings.
"""
embedding_dim = 50
word_index = tokenizer.word_index
print('Loading the embedding model')
embeddings_index = {}
if not os.path.exists(glove_path):
if not os.path.exists(f'{glove_path}.gz'):
print(f'Did not find {glove_path} word embeddings, downloading...')
url = 'https://github.com/icml2020-attention/glove/raw/main/glove.6B.50d.txt.gz'
urllib.request.urlretrieve(url, f'{glove_path}.gz')
with gzip.open(f'{glove_path}.gz', 'rt') as f_in:
with open(glove_path, 'wt') as f_out:
shutil.copyfileobj(f_in, f_out)
with open(glove_path) as f:
for line in f:
word, coefs = line.split(sep=' ', maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print(f'Found {len(embeddings_index)} word vectors.')
print(f'Found {len(word_index)} unique tokens.')
num_words = len(word_index) + 1
emb_mat = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
emb_vector = embeddings_index.get(word)
if emb_vector is not None:
# words not found in embedding index will be all-zeros.
emb_mat[i] = emb_vector
embedding_layer = tf.keras.layers.Embedding(
num_words, embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(emb_mat),
input_length=max_sentence_length,
trainable=False)
return embedding_layer
def _decode(x):
return np.array([s.decode() for s in x])
| 6,991 | 32.454545 | 86 | py |
neural-tangents | neural-tangents-main/examples/util.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of utility operations for running examples.
"""
import jax.numpy as np
def _accuracy(y, y_hat):
"""Compute the accuracy of the predictions with respect to one-hot labels."""
return np.mean(np.argmax(y, axis=1) == np.argmax(y_hat, axis=1))
def print_summary(name, labels, net_p, lin_p, loss):
"""Print summary information comparing a network with its linearization."""
print('\nEvaluating Network on {} data.'.format(name))
print('---------------------------------------')
print('Network Accuracy = {}'.format(_accuracy(net_p, labels)))
print('Network Loss = {}'.format(loss(net_p, labels)))
if lin_p is not None:
print('Linearization Accuracy = {}'.format(_accuracy(lin_p, labels)))
print('Linearization Loss = {}'.format(loss(lin_p, labels)))
print('RMSE of predictions: {}'.format(
np.sqrt(np.mean((net_p - lin_p) ** 2))))
print('---------------------------------------')
| 1,503 | 37.564103 | 79 | py |
neural-tangents | neural-tangents-main/examples/infinite_fcn.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example doing inference with an infinitely wide fully-connected network.
By default, this example does inference on a small CIFAR10 subset.
"""
import time
from absl import app
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from examples import datasets
from examples import util
_TRAIN_SIZE = 1000 # Dataset size to use for training.
_TEST_SIZE = 1000 # Dataset size to use for testing.
_BATCH_SIZE = 0 # Batch size for kernel computation. 0 for no batching.
def main(unused_argv):
# Build data pipelines.
print('Loading data.')
x_train, y_train, x_test, y_test = datasets.get_dataset('cifar10',
_TRAIN_SIZE,
_TEST_SIZE)
# Build the infinite network.
_, _, kernel_fn = stax.serial(
stax.Dense(1, 2., 0.05),
stax.Relu(),
stax.Dense(1, 2., 0.05)
)
# Optionally, compute the kernel in batches, in parallel.
kernel_fn = nt.batch(kernel_fn,
device_count=0,
batch_size=_BATCH_SIZE)
start = time.time()
# Bayesian and infinite-time gradient descent inference with infinite network.
predict_fn = nt.predict.gradient_descent_mse_ensemble(kernel_fn, x_train,
y_train, diag_reg=1e-3)
fx_test_nngp, fx_test_ntk = predict_fn(x_test=x_test)
fx_test_nngp.block_until_ready()
fx_test_ntk.block_until_ready()
duration = time.time() - start
print('Kernel construction and inference done in %s seconds.' % duration)
# Print out accuracy and loss for infinite network predictions.
loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat) ** 2)
util.print_summary('NNGP test', y_test, fx_test_nngp, None, loss)
util.print_summary('NTK test', y_test, fx_test_ntk, None, loss)
if __name__ == '__main__':
app.run(main)
| 2,505 | 33.805556 | 80 | py |
neural-tangents | neural-tangents-main/examples/function_space.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example comparing training a neural network with the NTK dynamics.
In this example, we train a neural network on a small subset of MNIST using an
MSE loss and SGD. We compare this training with the analytic function space
prediction using the NTK. Data is loaded using tensorflow datasets.
"""
from absl import app
from jax import grad
from jax import jit
from jax import random
from jax.example_libraries import optimizers
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from examples import datasets
from examples import util
_LEARNING_RATE = 1.0 # Learning rate to use during training.
_TRAIN_SIZE = 128 # Dataset size to use for training.
_TEST_SIZE = 128 # Dataset size to use for testing.
_TRAIN_TIME = 1000.0 # Continuous time denoting duration of training.
def main(unused_argv):
# Build data pipelines.
print('Loading data.')
x_train, y_train, x_test, y_test = datasets.get_dataset('mnist',
_TRAIN_SIZE,
_TEST_SIZE)
# Build the network
init_fn, apply_fn, _ = stax.serial(
stax.Dense(512, 1., 0.05),
stax.Erf(),
stax.Dense(10, 1., 0.05))
key = random.PRNGKey(0)
_, params = init_fn(key, (-1, 784))
# Create and initialize an optimizer.
opt_init, opt_apply, get_params = optimizers.sgd(_LEARNING_RATE)
state = opt_init(params)
# Create an mse loss function and a gradient function.
loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat) ** 2)
grad_loss = jit(grad(lambda params, x, y: loss(apply_fn(params, x), y)))
# Create an MSE predictor to solve the NTK equation in function space.
ntk = nt.batch(nt.empirical_ntk_fn(apply_fn, vmap_axes=0),
batch_size=64, device_count=0)
g_dd = ntk(x_train, None, params)
g_td = ntk(x_test, x_train, params)
predictor = nt.predict.gradient_descent_mse(g_dd, y_train)
# Get initial values of the network in function space.
fx_train = apply_fn(params, x_train)
fx_test = apply_fn(params, x_test)
# Train the network.
train_steps = int(_TRAIN_TIME // _LEARNING_RATE)
print('Training for {} steps'.format(train_steps))
for i in range(train_steps):
params = get_params(state)
state = opt_apply(i, grad_loss(params, x_train, y_train), state)
# Get predictions from analytic computation.
print('Computing analytic prediction.')
fx_train, fx_test = predictor(_TRAIN_TIME, fx_train, fx_test, g_td)
# Print out summary data comparing the linear / nonlinear model.
util.print_summary('train', y_train, apply_fn(params, x_train), fx_train,
loss)
util.print_summary('test', y_test, apply_fn(params, x_test), fx_test,
loss)
if __name__ == '__main__':
app.run(main)
| 3,399 | 34.789474 | 78 | py |
neural-tangents | neural-tangents-main/examples/elementwise.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of automatically deriving the closed-form NTK from NNGP.
For details, see :obj:`~neural_tangents.stax.Elementwise` and "`Fast Neural
Kernel Embeddings for General Activations <https://arxiv.org/abs/2209.04121>`_".
"""
from absl import app
from jax import numpy as np
from jax import random
from neural_tangents import stax
def main(unused_argv):
# Consider the normalized exponential kernel from
# https://arxiv.org/abs/2003.02237 (page 6).
def nngp_fn(cov12, var1, var2):
prod = np.sqrt(var1 * var2)
return prod * np.exp(cov12 / prod - 1)
# This kernel has no known corresponding elementwise nonlinearity.
# `stax.Elementwise` derives the NTK kernel automatically under the hood using
# automatic differentiation, without the need to know the respective
# nonlinearity or computing the integrals by hand.
_, _, kernel_fn = stax.serial(stax.Dense(1),
stax.Elementwise(nngp_fn=nngp_fn))
# Below we construct the kernel using the manually-derived NTK expression.
_, _, kernel_fn_manual = stax.serial(stax.Dense(1),
stax.ExpNormalized())
key = random.PRNGKey(1)
x1 = random.normal(key, (10, 2))
x2 = random.normal(key, (20, 2))
k_auto = kernel_fn(x1, x2, 'ntk')
k_manual = kernel_fn_manual(x1, x2, 'ntk')
# The two kernels match!
assert np.max(np.abs(k_manual - k_auto)) < 1e-6
print('NTK derived via autodiff matches the hand-derived NTK!')
if __name__ == '__main__':
app.run(main)
| 2,095 | 34.525424 | 80 | py |
neural-tangents | neural-tangents-main/examples/weight_space.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example comparing training a neural network with its linearization.
In this example we train a neural network and a linear model corresponding to
the first order Taylor seres of the network about its initial parameters. The
network is a fully-connected network with one hidden layer. We use momentum and
minibatching on the full MNIST dataset. Data is loaded using tensorflow.
datasets.
"""
from absl import app
from jax import grad
from jax import jit
from jax import random
from jax.example_libraries import optimizers
from jax.nn import log_softmax
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from examples import datasets
from examples import util
_LEARNING_RATE = 1.0 # Learning rate to use during training.
_BATCH_SIZE = 128 # Batch size to use during training.
_TRAIN_EPOCHS = 10 # Number of epochs to train for.
def main(unused_argv):
# Load data and preprocess it.
print('Loading data.')
x_train, y_train, x_test, y_test = datasets.get_dataset('mnist',
permute_train=True)
# Build the network
init_fn, f, _ = stax.serial(
stax.Dense(512, 1., 0.05),
stax.Erf(),
stax.Dense(10, 1., 0.05))
key = random.PRNGKey(0)
_, params = init_fn(key, (-1, 784))
# Linearize the network about its initial parameters.
f_lin = nt.linearize(f, params)
# Create and initialize an optimizer for both f and f_lin.
opt_init, opt_apply, get_params = optimizers.momentum(_LEARNING_RATE, 0.9)
opt_apply = jit(opt_apply)
state = opt_init(params)
state_lin = opt_init(params)
# Create a cross-entropy loss function.
loss = lambda fx, y_hat: -np.mean(log_softmax(fx) * y_hat)
# Specialize the loss function to compute gradients for both linearized and
# full networks.
grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y)))
grad_loss_lin = jit(grad(lambda params, x, y: loss(f_lin(params, x), y)))
# Train the network.
print('Training.')
print('Epoch\tLoss\tLinearized Loss')
print('------------------------------------------')
epoch = 0
steps_per_epoch = 50000 // _BATCH_SIZE
for i, (x, y) in enumerate(datasets.minibatch(
x_train, y_train, _BATCH_SIZE, _TRAIN_EPOCHS)):
params = get_params(state)
state = opt_apply(i, grad_loss(params, x, y), state)
params_lin = get_params(state_lin)
state_lin = opt_apply(i, grad_loss_lin(params_lin, x, y), state_lin)
if i % steps_per_epoch == 0:
print('{}\t{:.4f}\t{:.4f}'.format(
epoch, loss(f(params, x), y), loss(f_lin(params_lin, x), y)))
epoch += 1
# Print out summary data comparing the linear / nonlinear model.
x, y = x_train[:10000], y_train[:10000]
util.print_summary('train', y, f(params, x), f_lin(params_lin, x), loss)
util.print_summary(
'test', y_test, f(params, x_test), f_lin(params_lin, x_test), loss)
if __name__ == '__main__':
app.run(main)
| 3,533 | 32.339623 | 79 | py |
neural-tangents | neural-tangents-main/examples/experimental/empirical_ntk_tf.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal highly-experimental Tensorflow NTK example.
Specifically, Tensorflow NTK appears to have very long compile times (but OK
runtime), is prone to triggering XLA errors, and does not distinguish between
trainable and non-trainable parameters of the model.
For details about the empirical (finite width) NTK computation, please see
"`Fast Finite Width Neural Tangent Kernel <https://arxiv.org/abs/2206.08720>`_".
"""
from absl import app
import neural_tangents as nt
import tensorflow as tf
tf.random.set_seed(1)
def _get_ntks(f, x1, x2, params, vmap_axes):
"""Returns a list of NTKs computed using different implementations."""
kwargs = dict(
f=f,
trace_axes=(),
vmap_axes=vmap_axes,
)
# Default, baseline Jacobian contraction.
jacobian_contraction = nt.experimental.empirical_ntk_fn_tf(
**kwargs,
implementation=nt.NtkImplementation.JACOBIAN_CONTRACTION)
# (6, 3, 10, 10) full `np.ndarray` test-train NTK
ntk_jc = jacobian_contraction(x2, x1, params)
# NTK-vector products-based implementation.
ntk_vector_products = nt.experimental.empirical_ntk_fn_tf(
**kwargs,
implementation=nt.NtkImplementation.NTK_VECTOR_PRODUCTS)
ntk_vp = ntk_vector_products(x2, x1, params)
# Structured derivatives-based implementation.
structured_derivatives = nt.experimental.empirical_ntk_fn_tf(
**kwargs,
implementation=nt.NtkImplementation.STRUCTURED_DERIVATIVES)
ntk_sd = structured_derivatives(x2, x1, params)
# Auto-FLOPs-selecting implementation. Doesn't work correctly on CPU/GPU.
auto = nt.experimental.empirical_ntk_fn_tf(
**kwargs,
implementation=nt.NtkImplementation.AUTO)
ntk_auto = auto(x2, x1, params)
return [ntk_jc, ntk_vp, ntk_sd, ntk_auto]
def _check_ntks(ntks):
# Check that implementations match
for ntk1 in ntks:
for ntk2 in ntks:
diff = tf.reduce_max(tf.abs(ntk1 - ntk2))
print(f'NTK implementation diff {diff}.')
assert diff < 1e-4, diff
print('All NTK implementations match.')
def _compute_and_check_ntks(f, x1, x2, params):
ntks = _get_ntks(f, x1, x2, params, vmap_axes=None)
ntks_vmap = _get_ntks(f, x1, x2, params, vmap_axes=0)
_check_ntks(ntks + ntks_vmap)
def main(unused_argv):
x1 = tf.random.normal((6, 8, 8, 3), seed=1)
x2 = tf.random.normal((3, 8, 8, 3), seed=2)
# A vanilla CNN `tf.keras.Model` example.
print('A Keras CNN example.')
f = tf.keras.Sequential()
f.add(tf.keras.layers.Conv2D(16, (3, 3), activation='relu'))
f.add(tf.keras.layers.Conv2D(16, (3, 3), activation='relu'))
f.add(tf.keras.layers.Conv2D(16, (3, 3)))
f.add(tf.keras.layers.Flatten())
f.add(tf.keras.layers.Dense(10))
f.build((None, *x1.shape[1:]))
_, params = nt.experimental.get_apply_fn_and_params(f)
_compute_and_check_ntks(f, x1, x2, params)
# A `tf.function` example.
print('A `tf.function` example.')
params_tf = tf.random.normal((1, 2, 3, 4), seed=3)
@tf.function(input_signature=[tf.TensorSpec(None),
tf.TensorSpec((None, *x1.shape[1:]))])
def f_tf(params, x):
return tf.transpose(x, (0, 3, 1, 2)) * tf.reduce_mean(params**2) + 1.
_compute_and_check_ntks(f_tf, x1, x2, params_tf)
if __name__ == '__main__':
app.run(main)
| 3,838 | 30.991667 | 80 | py |
neural-tangents | neural-tangents-main/tests/empirical_ntk_test.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/empirical_ntk.py`."""
from absl.testing import absltest
from jax.config import config
from examples import empirical_ntk
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class EmpiricalNtkTest(test_utils.NeuralTangentsTestCase):
def test_empirical_ntk(self):
empirical_ntk.main(None)
if __name__ == '__main__':
absltest.main()
| 1,004 | 27.714286 | 74 | py |
neural-tangents | neural-tangents-main/tests/elementwise_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/elementwise.py`."""
from absl.testing import absltest
from jax.config import config
from examples import elementwise
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class ElementwiseTest(test_utils.NeuralTangentsTestCase):
def test_elementwise(self):
elementwise.main(None)
if __name__ == '__main__':
absltest.main()
| 996 | 26.694444 | 74 | py |
neural-tangents | neural-tangents-main/tests/rules_test.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/utils/rules.py`."""
import itertools
import logging
import random
from typing import Optional, Sequence, Tuple
import warnings
from absl.testing import absltest
import jax
from jax import lax
from jax.config import config
from jax.core import Primitive
from jax.core import ShapedArray
from jax.interpreters import ad
import jax.numpy as np
import more_itertools
from neural_tangents._src.utils import rules
from tests import test_utils
import numpy as onp
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
random.seed(1)
_PRECISIONS = [
None,
# lax.Precision.HIGHEST,
# lax.Precision.HIGH,
# lax.Precision.DEFAULT
]
_DTYPES = [
# np.bfloat16,
# np.float16,
np.float32
] + ([np.float64] if jax.dtypes.canonicalize_dtype(np.float64) == np.float64
else [])
_SHAPES = [
(),
# (0,),
(2,),
(0, 0),
(1, 0),
(0, 1),
# (1, 1),
# (2, 1),
(1, 2),
(2, 3),
# (3, 2),
# (0, 1, 0),
(1, 2, 3),
(6, 3, 2),
# (2, 1, 1),
# (3, 2, 1),
(1, 2, 1, 3),
# (2, 2, 2, 2),
(2, 1, 3, 4),
# (1, 2, 4, 3, 2),
# (2, 2, 2, 1, 3)
]
def _hypercube(ndim: int, start: int = 1, end: int = 3):
end = (end,) * ndim
start = (start,) * ndim
return tuple(itertools.product(
*[tuple(range(s, e)) for s, e in zip(start, end)]
))
def _prod(x: Sequence[int]) -> int:
out = 1
for i in x:
out *= i
return out
def _is_broadcastable(s1, s2) -> bool:
if not (len(s1) == 0 or len(s2) == 0 or len(s1) == len(s2)):
return False
for a, b in zip(s1, s2):
if not (a == 1 or b == 1 or a == b):
return False
return True
def _dot_dim_nums(s1, s2):
pairs = []
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
pairs += [(i, j)]
def get_dn(pairs, dn):
if len(pairs) == 0:
yield dn
for p in pairs:
new_pairs = [_p for _p in pairs if (_p[0] != p[0] and _p[1] != p[1])]
yield from get_dn(new_pairs, dn)
dn_c = (
(dn[0][0] + (p[0],), dn[0][1] + (p[1],)),
dn[1]
)
yield from get_dn(new_pairs, dn_c)
dn_b = (
dn[0],
(dn[1][0] + (p[0],), dn[1][1] + (p[1],)),
)
yield from get_dn(new_pairs, dn_b)
yield from get_dn(pairs, (((), ()), ((), ())))
def _conv_dim_nums(n: int, s2: Tuple[int, ...]):
dims = itertools.permutations(range(n))
dns = []
for i in itertools.product(dims, repeat=3):
dn = lax.ConvDimensionNumbers(*i)
if all(s2[s] != 0 for s in dn[1][2:]):
dns += [dn]
return random.sample(dns, min(50, len(dns)))
def _paddings(n: int):
pads = [
# (0, 0),
(0, 1),
(1, 0)
]
return list(itertools.product(pads, repeat=n))
def _strides(n: int, strides=(1, 2)):
return list(itertools.product(strides, repeat=n))
def _feature_group_counts(lhs_in: int, rhs_in: int, rhs_out: int):
if rhs_in == 0:
return []
feature_group_count, rem = divmod(lhs_in, rhs_in)
if rem != 0:
return []
if feature_group_count == 0 or rhs_out % feature_group_count != 0:
return []
return [feature_group_count]
def _batch_group_counts(lhs_in: int, rhs_out: int, feature_group_count: int):
batch_group_counts = []
if feature_group_count == 1:
for i in range(1, lhs_in + 1):
if lhs_in % i == 0 and rhs_out % i == 0:
batch_group_counts += [i]
return batch_group_counts
def _get_inputs(shapes, dtype):
n = len(shapes)
keys = jax.random.split(jax.random.PRNGKey(1), n)
return [jax.random.normal(k, s, dtype) for k, s in zip(keys, shapes)]
def _get_invals(idx, *xs):
return [ShapedArray(x.shape, x.dtype) if idx == i else
x for i, x in enumerate(xs)]
def _get_f_and_eqn(params, primitive, *inputs):
if primitive is None:
f = lambda x: x
eqn = None
else:
if primitive is lax.pad_p:
# TODO(romann): find a way to call primitive.bind directly.
f = lambda *inputs: lax.pad(*inputs, **params)
elif primitive is lax.conv_general_dilated_p:
# TODO(romann): find a way to call primitive.bind directly.
f = lambda *inputs: lax.conv_general_dilated(*inputs, **params)
else:
f = lambda *inputs: primitive.bind(*inputs, **params)
eqn = jax.make_jaxpr(f)(*inputs).eqns[0]
return eqn, f
def _concat_dims(*shapes):
dims = []
if len(shapes) == 0:
return dims
s0 = shapes[0]
n = len(s0)
if any(len(s) != n for s in shapes):
return dims
for i in range(n):
if all(s[j] == s0[j] for s in shapes for j in range(n) if j != i):
dims += [i]
return dims
def _concat_shapes(max_n_args: int = 4, *shapes):
sets = []
if len(shapes) == 0:
return sets
bins = {}
for s in shapes:
n = len(s)
if n in bins:
bins[n] += [s]
else:
bins[n] = [s]
for n in bins:
for n_args in range(max_n_args):
sets += list(itertools.combinations_with_replacement(bins[n], n_args))
return sets
_UNARY_PRIMITIVES = {
None: lambda s, _: [{}],
jax.lax.copy_p: lambda s, _: [{}],
ad.zeros_like_p: lambda s, _: [{}],
lax.neg_p: lambda s, _: [{}],
lax.transpose_p: lambda s, _: [
{'permutation': p}
for p in itertools.permutations(range(len(s)))
],
lax.reduce_sum_p: lambda s, _: [
{'axes': p}
for p in more_itertools.powerset(range(len(s)))
],
lax.reduce_window_sum_p: lambda s, _: [
{
'base_dilation': b_d,
'padding': p,
'window_dilation': w_dl,
'window_dimensions': w_dd,
'window_strides': w_s
} for b_d in _hypercube(len(s))[:3]
for p in map(tuple, ([[]] if len(s) == 0 else [
[(0, 0) for _ in range(len(s))],
[(i, i // 2 + 1) for i in range(len(s))],
[(i // 2 + 1, i) for i in range(len(s))],
])) for w_dl in _hypercube(len(s))[:3]
for w_dd in _hypercube(len(s))[:3]
for w_s in _hypercube(len(s))[:3]],
lax.broadcast_in_dim_p:
lambda s, _: [
{
'shape': sd,
'broadcast_dimensions': bd
} for sd, bd in [ # inserting 1 dimension
(s[:i] + (3,) + s[i:], tuple(range(i)) + tuple(
range(i + 1,
len(s) + 1))) for i in range(len(s) + 1)
] + [ # inserting 2 dimensions
(s[:i] + (3,) + s[i:j] + (4,) + s[j:], tuple(range(i)) + tuple(
range(i + 1, j + 1)) + tuple(range(j + 2,
len(s) + 2)))
for i in range(len(s))
for j in range(i,
len(s) + 1)
]
],
lax.squeeze_p:
lambda s, _: [{
'dimensions': d
} for d in more_itertools.powerset(
[idx for idx, i in enumerate(s) if i == 1])],
lax.convert_element_type_p:
lambda s, dtype: [{
'new_dtype': d,
'weak_type': w
} for d in set(
jax.dtypes.canonicalize_dtype(t)
for t in [np.bfloat16, np.float16, np.float32, np.float64]
if (jax.dtypes.canonicalize_dtype(t) != jax.dtypes.
canonicalize_dtype(dtype))) for w in [False] +
([True] if d != np.bfloat16 else [])],
lax.rev_p:
lambda s, _: [{
'dimensions': d
} for d in more_itertools.powerset(range(len(s)))],
lax.device_put_p:
lambda s, _: [{}], # Test cases generated elsewhere.
lax.pad_p:
lambda s, dtype: [{
'padding_value': v,
'padding_config': c
}
for v in [onp.array(f, dtype) for f in [-0.1, 1.5]]
for c in map(tuple, ([[]] if len(s) == 0 else [[
(1, 0, k) for k in range(len(s))
], [(k, 1, 0) for k in range(len(s))
], [(3 - k, k // 2, 1) for k in range(len(s))]]))],
lax.reshape_p:
lambda s, _: [{
'new_sizes': n_s,
'dimensions': d
}
for n_s in {
(_prod(s),),
(_prod(s), 1),
(1, _prod(s), 1),
} | ({(2, _prod(s) // 2, 1), (_prod(s) // 2, 1, 2)}
if _prod(s) % 2 == 0 else set()
) | ({(1, _prod(s) // 3, 3), (3, _prod(s) // 3)}
if _prod(s) % 3 == 0 else set()) | ({
(2, _prod(s) // 6, 3),
(3, _prod(s) // 6, 2),
(3, _prod(s) // 6, 2),
(_prod(s) // 6, 2, 3),
(2, 3, _prod(s) // 6),
(3, 2, _prod(s) // 6),
} if _prod(s) % 6 == 0 else set())
for d in itertools.permutations(range(len(s)))]
}
_BINARY_PRIMITIVES = {
# TODO(romann): what is the purpose of this primitive?
ad.add_jaxvals_p:
lambda s1, s2: ([{}] if s1 == s2 else []),
lax.mul_p:
lambda s1, s2: ([{}] if _is_broadcastable(s1, s2) else []),
lax.div_p:
lambda s1, s2: ([{}] if _is_broadcastable(s1, s2) else []),
lax.add_p:
lambda s1, s2: ([{}] if _is_broadcastable(s1, s2) else []),
lax.sub_p:
lambda s1, s2: ([{}] if _is_broadcastable(s1, s2) else []),
lax.dot_general_p:
lambda s1, s2: [
{
'dimension_numbers': dn,
'precision': precision,
'preferred_element_type': dtype
} for dn in set(_dot_dim_nums(s1, s2))
for precision in _PRECISIONS
for dtype in [None]
],
lax.conv_general_dilated_p:
lambda s1, s2: [
{
'window_strides': window_strides,
'padding': padding,
'lhs_dilation': lhs_dilation,
'rhs_dilation': rhs_dilation,
'dimension_numbers': dn,
'feature_group_count': feature_group_count,
'batch_group_count': batch_group_count,
'precision': precision,
'preferred_element_type': dtype
} for dn in _conv_dim_nums(len(s1), s2)
for padding in _paddings(len(s1) - 2)
for window_strides in _strides(len(s1) - 2)
for feature_group_count in _feature_group_counts(
lhs_in=s1[dn[0][1]], rhs_in=s2[dn[1][1]], rhs_out=s2[dn[1][0]])
for batch_group_count in _batch_group_counts(
s1[dn[0][0]], s2[dn[1][0]], feature_group_count)
for lhs_dilation in _strides(len(s1) - 2)
for rhs_dilation in _strides(len(s2) - 2)
for precision in _PRECISIONS
for dtype in [None]
] if (
len(s1) == len(s2) and
len(s1) >= 2
) else [],
}
_N_ARY_PRIMITIVES = {
lax.concatenate_p: lambda *shapes: [{'dimension': d}
for d in _concat_dims(*shapes)]
}
class JacobianRulesTest(test_utils.NeuralTangentsTestCase):
def _assert_is_diagonal(self, j, axis1, axis2, constant_diagonal: bool):
c = j.shape[axis1]
self.assertEqual(c, j.shape[axis2])
mask_shape = [c if i in (axis1, axis2) else 1 for i in range(j.ndim)]
mask = np.eye(c, dtype=np.bool_).reshape(mask_shape)
# Check that removing the diagonal makes the array all 0.
j_masked = np.where(mask, np.zeros((), j.dtype), j)
self.assertAllClose(np.zeros_like(j, j.dtype), j_masked)
if constant_diagonal:
# Check that diagonal is constant.
if j.size != 0:
j_diagonals = np.diagonal(j, axis1=axis1, axis2=axis2)
self.assertAllClose(np.min(j_diagonals, -1), np.max(j_diagonals, -1))
def _assert_constant(self, j, axis):
if axis is not None:
j = np.moveaxis(j, axis, 0)
j = list(j)
for ji in j:
self.assertAllClose(j[0], ji)
def _compare_jacobians(self, j_fwd, j_rev, j_rule, primitive):
if primitive == lax.convert_element_type_p:
# Check that only one of fwd/red Jacobians matches the rule.
e_fwd, e_rev = None, None
try:
self.assertAllClose(j_fwd, j_rule)
except Exception as e:
logging.exception('Forward-mode Jacobian does not match the rule.')
e_fwd = e
try:
self.assertAllClose(j_rev, j_rule)
except Exception as e:
logging.exception('Reverse-mode Jacobian does not match the rule.')
e_rev = e
if e_fwd is not None and e_rev is not None:
raise ValueError(e_fwd, e_rev)
else:
if primitive == lax.reshape_p:
# Reshape Jacobian is special-case defined as identity.
j_rule: np.ndarray
j_rule = j_rule.reshape(j_fwd.shape)
self.assertAllClose(j_fwd, j_rev)
if j_rule is not None:
self.assertAllClose(j_fwd, j_rule)
self.assertAllClose(j_rev, j_rule)
def _test_primitive(
self,
primitive: Optional[Primitive],
shapes,
dtype,
params
):
xs = _get_inputs(shapes, dtype)
n = len(xs)
eqn, f = _get_f_and_eqn(params, primitive, *xs)
out = f(*xs)
cts_in = ShapedArray(out.shape, out.dtype)
argnums = tuple(range(n))
js_fwd = jax.jacfwd(f, argnums)(*xs)
js_rev = jax.jacrev(f, argnums)(*xs)
for idx in range(n):
if primitive == lax.conv_general_dilated_p and idx == 0:
raise absltest.SkipTest('Jacobian of CNN wrt inputs not implemented.')
if primitive == lax.div_p and idx == 1:
raise absltest.SkipTest('Division is linear only in the first arg.')
invals = _get_invals(idx, *xs)
j_fwd, j_rev = js_fwd[idx], js_rev[idx]
if primitive in rules.JACOBIAN_RULES:
j_rule = rules.JACOBIAN_RULES[primitive](eqn, idx, invals, cts_in)
else:
warnings.warn(f'Jacobian rule for {primitive} at position {idx} not '
f'found.')
j_rule = None
with self.subTest(f'Jacobian ({idx})'):
self._compare_jacobians(j_fwd, j_rev, j_rule, primitive)
structure = rules.STRUCTURE_RULES[primitive](eqn, idx, invals, cts_in)
j = j_fwd if j_rule is None else j_rule
if primitive == lax.reshape_p:
out_ndim = xs[0].ndim
j = j.transpose(tuple(xs[0].ndim + i
for i in onp.argsort(structure.in_trace)) +
tuple(i for i in onp.argsort(structure.in_trace)))
j = j.reshape(
xs[0].shape +
tuple(xs[0].shape[i] for i in onp.argsort(structure.in_trace)))
else:
out_ndim = out.ndim
with self.subTest(f'Diagonal axes ({idx})'):
for i, o in zip(structure.in_diagonal, structure.out_diagonal):
self._assert_is_diagonal(
j=j,
axis1=out_ndim + i[idx],
axis2=o,
constant_diagonal=False)
with self.subTest(f'Constant diagonal axes ({idx})'):
for i, o in zip(structure.in_trace, structure.out_trace):
self._assert_is_diagonal(
j=j,
axis1=out_ndim + i,
axis2=o,
constant_diagonal=True)
with self.subTest(f'Input broadcast axes ({idx})'):
for i in structure.in_broadcast:
self._assert_constant(j=j, axis=i)
with self.subTest(f'Output broadcast axes ({idx})'):
for i in structure.out_broadcast:
self._assert_constant(j=j, axis=i)
@test_utils.parameters(
dict(
primitive=primitive,
shape=shape,
dtype=dtype,
params=params,
)
for shape in _SHAPES for dtype in _DTYPES
for primitive in _UNARY_PRIMITIVES.keys()
for params in _UNARY_PRIMITIVES[primitive](shape, dtype)
)
def test_unary(self, primitive: Optional[Primitive], shape, dtype, params):
if primitive == lax.device_put_p:
# Can't instantiate devices at test generation time; using subtests.
devices = [None] + jax.devices() + jax.devices('cpu')
for device in devices:
for src in devices:
with self.subTest(device=device, src=src):
params = {'device': device, 'src': src}
self._test_primitive(primitive, [shape], dtype, params)
else:
self._test_primitive(primitive, [shape], dtype, params)
@test_utils.parameters(
dict(
primitive=primitive,
shape1=shape1,
shape2=shape2,
dtype=dtype,
params=params
)
for shape1 in _SHAPES
for shape2 in _SHAPES
for dtype in _DTYPES
for primitive in _BINARY_PRIMITIVES.keys()
for params in _BINARY_PRIMITIVES[primitive](shape1, shape2)
)
def test_binary(
self,
primitive: Optional[Primitive],
shape1,
shape2,
dtype,
params
):
# TODO(romann): revisit when bugs below are fixed.
if primitive == lax.conv_general_dilated_p:
if jax.default_backend() == 'tpu':
raise absltest.SkipTest('http://b/235167364')
elif jax.default_backend() == 'gpu' and params['batch_group_count'] != 1:
raise absltest.SkipTest('http://b/235485533')
if len(shape1) > 3 or len(shape2) > 3:
test_utils.skip_test(self)
self._test_primitive(primitive, [shape1, shape2], dtype, params)
@test_utils.parameters(
dict(
primitive=primitive,
shapes=shapes,
dtype=dtype,
params=params
)
for shapes in _concat_shapes(4, *_SHAPES)
for dtype in _DTYPES
for primitive in _N_ARY_PRIMITIVES.keys()
for params in _N_ARY_PRIMITIVES[primitive](*shapes)
)
def test_n_ary(self, primitive: Optional[Primitive], shapes, dtype, params):
self._test_primitive(primitive, shapes, dtype, params)
if __name__ == '__main__':
absltest.main()
| 18,744 | 27.927469 | 80 | py |
neural-tangents | neural-tangents-main/tests/function_space_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/function_space.py`."""
from absl.testing import absltest
from jax.config import config
from examples import function_space
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class FunctionSpaceTest(test_utils.NeuralTangentsTestCase):
def test_function_space(self):
function_space.main(None)
if __name__ == '__main__':
absltest.main()
| 1,010 | 27.083333 | 74 | py |
neural-tangents | neural-tangents-main/tests/weight_space_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/weight_space.py`."""
from absl.testing import absltest
from jax.config import config
from examples import weight_space
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class WeightSpaceTest(test_utils.NeuralTangentsTestCase):
def test_weight_space(self):
weight_space.main(None)
if __name__ == '__main__':
absltest.main()
| 999 | 27.571429 | 74 | py |
neural-tangents | neural-tangents-main/tests/imdb_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/imdb.py`."""
from absl.testing import absltest
from jax.config import config
from examples import imdb
from tests import test_utils
config.parse_flags_with_absl()
class ImdbTest(test_utils.NeuralTangentsTestCase):
def test_imdb(self):
imdb.main(use_dummy_data=True)
if __name__ == '__main__':
absltest.main()
| 925 | 25.457143 | 74 | py |
neural-tangents | neural-tangents-main/tests/batching_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/batching.py`."""
from absl.testing import absltest
from functools import partial
from jax import jit
from jax.config import config
import jax.numpy as np
from jax import random
from jax.tree_util import tree_map
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src import batching
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
FLAT = 'FLAT'
POOLING = 'POOLING'
INTERMEDIATE_CONV = 'INTERMEDIATE_CONV'
# TODO(schsam): Add a pooling test when multiple inputs are supported in
# Conv + Pooling.
TRAIN_SIZES = [2, 4, 8]
TEST_SIZES = [2, 16]
INPUT_SHAPES = [(16,), (4, 4, 3)]
NETWORK = [FLAT, INTERMEDIATE_CONV, POOLING]
OUTPUT_LOGITS = [1, 2, 3]
CONVOLUTION_CHANNELS = 2
WIDTH = 2
RTOL = 1e-2
test_utils.update_test_tolerance(f64_tol=5e-5)
def _build_network(input_shape, network, out_logits, use_dropout):
dropout = stax.Dropout(0.9, mode='train') if use_dropout else stax.Identity()
if len(input_shape) == 1:
if network != FLAT:
raise absltest.SkipTest('Not testing pooling on FCN inputs.')
return stax.serial(
stax.Dense(WIDTH, W_std=2.0, b_std=0.5), dropout,
stax.Dense(out_logits, W_std=2.0, b_std=0.5))
elif len(input_shape) == 3:
if network == POOLING:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (2, 2), W_std=2.0, b_std=0.05),
stax.GlobalAvgPool(), dropout,
stax.Dense(out_logits, W_std=2.0, b_std=0.5))
elif network == FLAT:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (2, 2), W_std=2.0, b_std=0.05),
stax.Flatten(), dropout, stax.Dense(out_logits, W_std=2.0, b_std=0.5))
elif network == INTERMEDIATE_CONV:
return stax.Conv(CONVOLUTION_CHANNELS, (2, 2), W_std=2.0, b_std=0.05)
else:
raise ValueError('Unexpected network type found: {}'.format(network))
else:
raise ValueError('Expected flat or image test input.')
def _empirical_kernel(key, input_shape, network, out_logits, use_dropout):
init_fn, f, _ = _build_network(input_shape, network, out_logits, use_dropout)
key, split = random.split(key)
_, params = init_fn(key, (-1,) + input_shape)
kernel_fn = jit(nt.empirical_ntk_fn(
f,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION))
return partial(kernel_fn, params=params, keys=split)
def _theoretical_kernel(unused_key, input_shape, network, just_theta,
use_dropout):
_, _, _kernel_fn = _build_network(input_shape, network, 1, use_dropout)
@jit
def kernel_fn(x1, x2=None):
get_all = None
k = _kernel_fn(x1, x2, 'ntk') if just_theta else _kernel_fn(x1, x2, get_all)
return k
return kernel_fn
KERNELS = {}
for o in OUTPUT_LOGITS:
KERNELS['empirical_logits_{}'.format(o)] = partial(
_empirical_kernel, out_logits=o, use_dropout=False)
KERNELS['theoretical'] = partial(
_theoretical_kernel, just_theta=True, use_dropout=True)
KERNELS['theoretical_pytree'] = partial(
_theoretical_kernel, just_theta=False, use_dropout=True)
def _test_kernel_against_batched(
cls,
kernel_fn,
batched_kernel_fn,
train,
test,
is_parallel_only=False
):
g = kernel_fn(train, None)
g_b = batched_kernel_fn(train, None)
if is_parallel_only and hasattr(g_b, 'x1_is_x2'):
# In the parallel setting, `x1_is_x2` is not computed correctly when x1==x2.
g_b = g_b.replace(x1_is_x2=g.x1_is_x2)
cls.assertAllClose(g, g_b)
g = kernel_fn(train, test)
g_b = batched_kernel_fn(train, test)
if is_parallel_only and hasattr(g_b, 'x1_is_x2'):
g_b = g_b.replace(x1_is_x2=g.x1_is_x2)
cls.assertAllClose(g, g_b)
class BatchTest(test_utils.NeuralTangentsTestCase):
@classmethod
def _get_data_and_kernel_fn(
self,
input_shape,
kernel_type,
network,
test_size,
train_size,
**kwargs
):
test_utils.stub_out_pmap(batching, 2)
key = random.PRNGKey(0)
key, self_split, other_split = random.split(key, 3)
data_self = random.normal(self_split, (train_size, *input_shape))
data_other = random.normal(other_split, (test_size, *input_shape))
kernel_fn = KERNELS[kernel_type]
kernel_fn = kernel_fn(key, input_shape, network, **kwargs)
return data_other, data_self, kernel_fn
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
kernel_type=list(KERNELS.keys()),
batch_size=[2, 8]
)
def testSerial(
self,
train_size,
test_size,
input_shape,
network,
kernel_type,
batch_size
):
data_other, data_self, kernel_fn = self._get_data_and_kernel_fn(
input_shape,
kernel_type,
network,
test_size,
train_size
)
kernel_batched = batching._serial(kernel_fn, batch_size=batch_size)
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other)
# We also exclude tests for dropout + parallel. It is not clear what is the
# best way to handle this case.
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
kernel_type=list(KERNELS.keys()),
)
def testParallel(
self,
train_size,
test_size,
input_shape,
network,
kernel_type,
):
data_other, data_self, kernel_fn = self._get_data_and_kernel_fn(
input_shape,
kernel_type,
network,
test_size,
train_size,
use_dropout=False
)
kernel_batched = batching._parallel(kernel_fn)
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other, True)
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
kernel_type=list(KERNELS.keys()),
batch_size=[2, 8]
)
def testComposition(
self,
train_size,
test_size,
input_shape,
network,
kernel_type,
batch_size
):
data_other, data_self, kernel_fn = self._get_data_and_kernel_fn(input_shape,
kernel_type,
network,
test_size,
train_size)
kernel_batched = batching._parallel(
batching._serial(kernel_fn, batch_size=batch_size))
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other)
kernel_batched = batching._serial(
batching._parallel(kernel_fn), batch_size=batch_size)
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other)
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
kernel_type=list(KERNELS.keys()),
batch_size=[2, 8]
)
def testAutomatic(
self,
train_size,
test_size,
input_shape,
network,
kernel_type,
batch_size
):
data_other, data_self, kernel_fn = self._get_data_and_kernel_fn(
input_shape,
kernel_type,
network,
test_size,
train_size
)
kernel_batched = batching.batch(kernel_fn, batch_size=batch_size)
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other)
kernel_batched = batching.batch(
kernel_fn, batch_size=batch_size, store_on_device=False)
_test_kernel_against_batched(self, kernel_fn, kernel_batched, data_self,
data_other)
def _test_analytic_kernel_composition(self, batching_fn):
# Check Fully-Connected.
rng = random.PRNGKey(0)
rng_self, rng_other = random.split(rng)
x_self = random.normal(rng_self, (8, 2))
x_other = random.normal(rng_other, (2, 2))
Block = stax.serial(stax.Dense(256), stax.Relu())
_, _, ker_fn = Block
ker_fn = batching_fn(ker_fn)
_, _, composed_ker_fn = stax.serial(Block, Block)
ker_out = ker_fn(ker_fn(x_self))
composed_ker_out = composed_ker_fn(x_self)
if batching_fn == batching._parallel:
# In the parallel setting, `x1_is_x2` is not computed correctly
# when x1==x2.
composed_ker_out = composed_ker_out.replace(x1_is_x2=ker_out.x1_is_x2)
self.assertAllClose(ker_out, composed_ker_out)
ker_out = ker_fn(ker_fn(x_self, x_other))
composed_ker_out = composed_ker_fn(x_self, x_other)
if batching_fn == batching._parallel:
composed_ker_out = composed_ker_out.replace(x1_is_x2=ker_out.x1_is_x2)
self.assertAllClose(ker_out, composed_ker_out)
# Check convolutional + pooling.
x_self = random.normal(rng, (8, 4, 4, 3))
x_other = random.normal(rng, (2, 4, 4, 3))
Block = stax.serial(stax.Conv(256, (2, 2)), stax.Relu())
Readout = stax.serial(stax.GlobalAvgPool(), stax.Dense(10))
block_ker_fn, readout_ker_fn = Block[2], Readout[2]
_, _, composed_ker_fn = stax.serial(Block, Readout)
block_ker_fn = batching_fn(block_ker_fn)
readout_ker_fn = batching_fn(readout_ker_fn)
ker_out = readout_ker_fn(block_ker_fn(x_self))
composed_ker_out = composed_ker_fn(x_self)
if batching_fn == batching._parallel:
composed_ker_out = composed_ker_out.replace(x1_is_x2=ker_out.x1_is_x2)
self.assertAllClose(ker_out, composed_ker_out)
ker_out = readout_ker_fn(block_ker_fn(x_self, x_other))
composed_ker_out = composed_ker_fn(x_self, x_other)
if batching_fn == batching._parallel:
composed_ker_out = composed_ker_out.replace(x1_is_x2=ker_out.x1_is_x2)
self.assertAllClose(ker_out, composed_ker_out)
@test_utils.product(
store_on_device=[True, False],
batch_size=[2, 8]
)
def testAnalyticKernelComposeSerial(self, store_on_device, batch_size):
self._test_analytic_kernel_composition(
partial(
batching._serial,
batch_size=batch_size,
store_on_device=store_on_device))
def testAnalyticKernelComposeParallel(self):
test_utils.stub_out_pmap(batching, 2)
self._test_analytic_kernel_composition(batching._parallel)
@test_utils.product(
store_on_device=[True, False],
batch_size=[2, 8]
)
def testAnalyticKernelComposeAutomatic(self, store_on_device, batch_size):
test_utils.stub_out_pmap(batching, 2)
self._test_analytic_kernel_composition(
partial(
batching.batch, batch_size=batch_size,
store_on_device=store_on_device))
def test_jit_or_pmap_broadcast(self):
def kernel_fn(x1,
x2,
do_flip,
keys,
do_square,
params,
_unused=None,
p=0.65):
res = np.abs(np.matmul(x1, x2))
if do_square:
res *= res
if do_flip:
res = -res
res *= random.uniform(keys) * p
return [res, params]
params = (np.array([1., 0.3]), (np.array([1.2]), np.array([0.5])))
x2 = np.arange(0, 10).reshape((10,))
keys = random.PRNGKey(1)
kernel_fn_pmapped = batching._jit_or_pmap_broadcast(kernel_fn,
device_count=0)
x1 = np.arange(0, 10).reshape((1, 10))
for do_flip in [True, False]:
for do_square in [True, False]:
with self.subTest(do_flip=do_flip, do_square=do_square, device_count=0):
res_1 = kernel_fn(
x1, x2, do_flip, keys, do_square, params, _unused=True, p=0.65)
res_2 = kernel_fn_pmapped(
x1, x2, do_flip, keys, do_square, params, _unused=True)
self.assertAllClose(res_1, res_2)
test_utils.stub_out_pmap(batching, 1)
x1 = np.arange(0, 10).reshape((1, 10))
kernel_fn_pmapped = batching._jit_or_pmap_broadcast(kernel_fn,
device_count=1)
for do_flip in [True, False]:
for do_square in [True, False]:
with self.subTest(do_flip=do_flip, do_square=do_square, device_count=1):
res_1 = kernel_fn(
x1, x2, do_flip, keys, do_square, params, _unused=False, p=0.65)
res_2 = kernel_fn_pmapped(
x1, x2, do_flip, keys, do_square, params, _unused=None)
self.assertAllClose(res_1[0], res_2[0])
self.assertAllClose(
tree_map(partial(np.expand_dims, axis=0), res_1[1]), res_2[1])
kernel_fn_pmapped = batching._jit_or_pmap_broadcast(kernel_fn,
device_count=2)
x1 = np.arange(0, 20).reshape((2, 10))
test_utils.stub_out_pmap(batching, 2)
def broadcast(arg):
return np.broadcast_to(arg, (2,) + arg.shape)
for do_flip in [True, False]:
for do_square in [True, False]:
with self.subTest(do_flip=do_flip, do_square=do_square, device_count=2):
res_1 = kernel_fn(x1, x2, do_flip, keys, do_square, params, p=0.2)
res_2 = kernel_fn_pmapped(
x1, x2, do_flip, keys, do_square, params, _unused=None, p=0.2)
self.assertAllClose(res_1[0][0], res_2[0][0])
self.assertAllClose(res_1[0][1], res_2[0][1])
self.assertAllClose(tree_map(broadcast, res_1[1]), res_2[1])
@test_utils.product(
same_inputs=[True, False]
)
def test_parallel_in_out(self, same_inputs):
test_utils.stub_out_pmap(batching, 2)
rng = random.PRNGKey(0)
input_key1, input_key2 = random.split(rng, 2)
x1_1, x1_2, x1_3 = random.normal(input_key1, (3, 4, 1))
x1 = (x1_1, (x1_2, x1_3))
if same_inputs:
x2 = None
else:
x2_1, x2_2, x2_3 = random.normal(input_key2, (3, 8, 1))
x2 = (x2_1, (x2_2, x2_3))
N = WIDTH
def net(N_out):
return stax.parallel(stax.Dense(N_out),
stax.parallel(stax.Dense(N_out + 1),
stax.Dense(N_out + 2)))
# Check NNGP.
readin = net(N)
readout = net(1)
K_readin_fn = jit(readin[2])
K_readout_fn = jit(partial(readout[2], get='nngp'))
batch_K_readin_fn = batching.batch(K_readin_fn, 2)
batch_K_readout_fn = batching.batch(K_readout_fn, 2)
test_utils.assert_close_matrices(
self,
K_readout_fn(K_readin_fn(x1, x2)),
batch_K_readout_fn(batch_K_readin_fn(x1, x2)),
RTOL)
# Check Both.
K_readin_fn = jit(readin[2])
K_readout_fn = jit(partial(readout[2], get=('nngp', 'ntk')))
batch_K_readin_fn = batching.batch(K_readin_fn, 2)
batch_K_readout_fn = batching.batch(K_readout_fn, 2)
test_utils.assert_close_matrices(
self,
K_readout_fn(K_readin_fn(x1, x2)),
batch_K_readout_fn(batch_K_readin_fn(x1, x2)),
RTOL)
@test_utils.product(
same_inputs=[True, False]
)
def test_parallel_in_out_empirical(self, same_inputs):
test_utils.stub_out_pmap(batching, 2)
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2, x1_3 = random.normal(input_key1, (3, 4, 1))
x1 = (x1_1, (x1_2, x1_3))
if same_inputs:
x2 = None
else:
x2_1, x2_2, x2_3 = random.normal(input_key2, (3, 8, 1))
x2 = (x2_1, (x2_2, x2_3))
def net(N_out):
return stax.parallel(stax.Dense(N_out),
stax.parallel(stax.Dense(N_out + 1),
stax.Dense(N_out + 2)))
# Check NNGP.
init_fn, apply_fn, _ = net(WIDTH)
_, params = init_fn(net_key, ((-1, 1), ((-1, 1), (-1, 1))))
kernel_fn = jit(nt.empirical_nngp_fn(apply_fn))
batch_kernel_fn = jit(batching.batch(kernel_fn, 2))
test_utils.assert_close_matrices(
self,
kernel_fn(x1, x2, params),
batch_kernel_fn(x1, x2, params),
RTOL)
# Check NTK.
init_fn, apply_fn, _ = stax.serial(net(WIDTH), net(1))
_, params = init_fn(net_key, ((-1, 1), ((-1, 1), (-1, 1))))
kernel_fn = jit(nt.empirical_ntk_fn(
apply_fn,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION))
batch_kernel_fn = jit(batching.batch(kernel_fn, 2))
test_utils.assert_close_matrices(
self,
kernel_fn(x1, x2, params),
batch_kernel_fn(x1, x2, params),
RTOL)
@test_utils.product(
same_inputs=[True, False],
device_count=[-1, 0, 1, 2],
trace_axes=[(-1,), (1, -1), ()],
diagonal_axes=[(1,), (), (1, -1)]
)
def test_empirical_ntk_diagonal_outputs(
self,
same_inputs,
device_count,
trace_axes,
diagonal_axes
):
if any (t in diagonal_axes for t in trace_axes):
raise absltest.SkipTest('Overlapping axes.')
test_utils.stub_out_pmap(batching, 2)
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
init_fn, apply_fn, _ = stax.serial(stax.Dense(5),
stax.Relu(),
stax.Dense(3))
test_x1 = random.normal(input_key1, (12, 4, 4))
test_x2 = None
if same_inputs:
test_x2 = random.normal(input_key2, (9, 4, 4))
kernel_fn = nt.empirical_ntk_fn(
apply_fn,
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
vmap_axes=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
_, params = init_fn(net_key, test_x1.shape)
true_kernel = kernel_fn(test_x1, test_x2, params)
batched_fn = batching.batch(kernel_fn, device_count=device_count,
batch_size=3)
batch_kernel = batched_fn(test_x1, test_x2, params)
self.assertAllClose(true_kernel, batch_kernel)
if __name__ == '__main__':
absltest.main()
| 18,750 | 31.107877 | 80 | py |
neural-tangents | neural-tangents-main/tests/predict_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/predict.py`."""
import math
from absl.testing import absltest
from jax import grad
from jax import jit
from jax import random
from jax import vmap
from jax.config import config
from jax.example_libraries import optimizers
from jax.flatten_util import ravel_pytree
import jax.numpy as np
import jax.tree_util
import neural_tangents as nt
from neural_tangents import predict, stax
from neural_tangents._src.predict import _is_on_cpu
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
OUTPUT_LOGITS = [2]
GETS = ('ntk', 'nngp', ('ntk', 'nngp'))
RTOL = 0.01
ATOL = 0.01
if not config.read('jax_enable_x64'):
RTOL = 0.02
ATOL = 0.02
FLAT = 'FLAT'
POOLING = 'POOLING'
# TODO(schsam): Add a pooling test when multiple inputs are supported in
# Conv + Pooling.
TRAIN_SIZES = [4, 8]
TEST_SIZES = [6, 2]
INPUT_SHAPES = [(8,), (4, 4, 3)]
NETWORK = [FLAT, FLAT]
CONVOLUTION_CHANNELS = 256
test_utils.update_test_tolerance()
def _build_network(input_shape, network, out_logits):
if len(input_shape) == 1:
assert network == FLAT
return stax.serial(
stax.Dense(4096, W_std=1.2, b_std=0.05), stax.Erf(),
stax.Dense(out_logits, W_std=1.2, b_std=0.05))
elif len(input_shape) == 3:
if network == POOLING:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.GlobalAvgPool(), stax.Dense(out_logits, W_std=2.0, b_std=0.05))
elif network == FLAT:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.Flatten(), stax.Dense(out_logits, W_std=2.0, b_std=0.05))
else:
raise ValueError('Unexpected network type found: {}'.format(network))
else:
raise ValueError('Expected flat or image test input.')
def _empirical_kernel(key, input_shape, network, out_logits):
init_fn, f, _ = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
_kernel_fn = nt.empirical_kernel_fn(f, trace_axes=(), vmap_axes=0)
kernel_fn = lambda x1, x2, get: _kernel_fn(x1, x2, get, params)
return params, f, jit(kernel_fn, static_argnames='get')
def _theoretical_kernel(key, input_shape, network, out_logits):
init_fn, f, kernel_fn = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
return params, f, jit(kernel_fn, static_argnames='get')
KERNELS = {
'empirical': _empirical_kernel,
'theoretical': _theoretical_kernel,
}
class PredictTest(test_utils.NeuralTangentsTestCase):
def _test_zero_time(self, predictor, fx_train_0, fx_test_0, g_td, momentum):
fx_train_t0, fx_test_t0 = predictor(0.0, fx_train_0, fx_test_0, g_td)
self.assertAllClose(fx_train_0, fx_train_t0)
self.assertAllClose(fx_test_0, fx_test_t0)
fx_train_only_t0 = predictor(0.0, fx_train_0, None, g_td)
self.assertAllClose(fx_train_0, fx_train_only_t0)
if momentum is not None:
# Test state-based prediction
state_0 = predict.ODEState(fx_train_0, fx_test_0) # pytype:disable=wrong-arg-count
state_t0 = predictor(0.0, state_0, None, g_td)
self.assertAllClose(state_0.fx_train, state_t0.fx_train)
self.assertAllClose(state_0.fx_test, state_t0.fx_test)
state_train_only_0 = predict.ODEState(fx_train_0) # pytype:disable=wrong-arg-count
state_train_only_t0 = predictor(0.0, state_0, None, g_td)
self.assertAllClose(state_train_only_0.fx_train,
state_train_only_t0.fx_train)
def _test_inf_time(self, predictor, fx_train_0, fx_test_0, g_td, y_train):
# Test infinite-time prediction
pr_inf = predictor(np.inf, fx_train_0)
self.assertAllClose(pr_inf, y_train, check_dtypes=False)
self.assertAllClose(pr_inf, predictor(None, fx_train_0))
self.assertAllClose(predictor(np.inf, fx_train_0, fx_test_0, g_td),
predictor(None, fx_train_0, fx_test_0, g_td))
def _test_multi_step(self, predictor, fx_train_0, fx_test_0, g_td, momentum):
# Test multi-time prediction
ts = np.arange(6).reshape((2, 1, 3))
fx_train_single, fx_test_single = predictor(ts, fx_train_0, fx_test_0, g_td)
fx_train_concat, fx_test_concat = [], []
for t in ts.ravel():
fx_train_concat_t, fx_test_concat_t = predictor(t, fx_train_0, fx_test_0,
g_td)
fx_train_concat += [fx_train_concat_t]
fx_test_concat += [fx_test_concat_t]
fx_train_concat = np.stack(fx_train_concat).reshape(
ts.shape + fx_train_single.shape[ts.ndim:])
fx_test_concat = np.stack(fx_test_concat).reshape(
ts.shape + fx_test_single.shape[ts.ndim:])
self.assertAllClose(fx_train_concat, fx_train_single)
self.assertAllClose(fx_test_concat, fx_test_single)
if momentum is not None:
state_0 = predict.ODEState(fx_train_0, fx_test_0) # pytype:disable=wrong-arg-count
t_1 = (0, 0, 2)
state_1 = predictor(ts[t_1], state_0, None, g_td)
self.assertAllClose(fx_train_single[t_1], state_1.fx_train)
self.assertAllClose(fx_test_single[t_1], state_1.fx_test)
t_max = (-1,) * ts.ndim
state_max = predictor(ts[t_max] - ts[t_1], state_1, None, g_td)
self.assertAllClose(fx_train_single[t_max], state_max.fx_train)
self.assertAllClose(fx_test_single[t_max], state_max.fx_test)
@classmethod
def _get_inputs(cls, out_logits, test_shape, train_shape):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = random.normal(split, test_shape)
return key, x_test, x_train, y_train
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
out_logits=OUTPUT_LOGITS,
kernel_type=list(KERNELS.keys()),
momentum=[None, 0.9],
learning_rate=[0.0002],
t=[5],
loss=['mse_analytic', 'mse'],
)
def testNTKGDPrediction(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
kernel_type,
momentum,
learning_rate,
t,
loss
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
key, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
fn_and_kernel = KERNELS[kernel_type]
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
g_dd = ntk(x_train, None, 'ntk')
g_td = ntk(x_test, x_train, 'ntk')
# Regress to an MSE loss.
loss_fn = lambda y, y_hat: 0.5 * np.mean((y - y_hat)**2)
grad_loss = jit(grad(lambda params, x: loss_fn(f(params, x), y_train)))
trace_axes = () if g_dd.ndim == 4 else (-1,)
if loss == 'mse_analytic':
if momentum is not None:
raise absltest.SkipTest(momentum)
predictor = predict.gradient_descent_mse(g_dd, y_train,
learning_rate=learning_rate,
trace_axes=trace_axes)
elif loss == 'mse':
predictor = predict.gradient_descent(loss_fn, g_dd, y_train,
learning_rate=learning_rate,
momentum=momentum,
trace_axes=trace_axes)
else:
raise NotImplementedError(loss)
predictor = jit(predictor)
fx_train_0 = f(params, x_train)
fx_test_0 = f(params, x_test)
self._test_zero_time(predictor, fx_train_0, fx_test_0, g_td, momentum)
self._test_multi_step(predictor, fx_train_0, fx_test_0, g_td, momentum)
if loss == 'mse_analytic':
self._test_inf_time(predictor, fx_train_0, fx_test_0, g_td, y_train)
if momentum is None:
opt_init, opt_update, get_params = optimizers.sgd(learning_rate)
else:
opt_init, opt_update, get_params = optimizers.momentum(learning_rate,
momentum)
opt_state = opt_init(params)
for i in range(t):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
params = get_params(opt_state)
fx_train_nn, fx_test_nn = f(params, x_train), f(params, x_test)
fx_train_t, fx_test_t = predictor(t, fx_train_0, fx_test_0, g_td)
self.assertAllClose(fx_train_nn, fx_train_t, rtol=RTOL, atol=ATOL)
self.assertAllClose(fx_test_nn, fx_test_t, rtol=RTOL, atol=ATOL)
@classmethod
def _cov_empirical(cls, x):
return np.einsum('itjk,itlk->tjl', x, x, optimize=True) / (x.shape[0] *
x.shape[-1])
@test_utils.product(
train_size=TRAIN_SIZES[:1],
test_size=TEST_SIZES[:1],
input_shape=INPUT_SHAPES[:1],
out_logits=[1],
)
def testNTKMeanCovPrediction(
self,
train_size,
test_size,
input_shape,
out_logits,
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
key, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
init_fn, f, kernel_fn = stax.serial(
stax.Dense(512, W_std=1.2, b_std=0.05), stax.Erf(),
stax.Dense(out_logits, W_std=1.2, b_std=0.05))
reg = 1e-6
predictor = predict.gradient_descent_mse_ensemble(kernel_fn, x_train,
y_train, diag_reg=reg)
ts = np.array([1., 5., 10.])
fx_test_inf, cov_test_inf = predictor(ts, x_test, 'ntk', True)
self.assertEqual(cov_test_inf.shape[1], x_test.shape[0])
self.assertGreater(np.min(np.linalg.eigh(cov_test_inf)[0]), -1e-8)
fx_train_inf, cov_train_inf = predictor(ts, None, 'ntk', True)
self.assertEqual(cov_train_inf.shape[1], x_train.shape[0])
self.assertGreater(np.min(np.linalg.eigh(cov_train_inf)[0]), -1e-8)
_kernel_fn = nt.empirical_kernel_fn(f)
kernel_fn = jit(lambda x1, x2, params: _kernel_fn(x1, x2, 'ntk', params))
def predict_empirical(key):
_, params = init_fn(key, train_shape)
g_dd = kernel_fn(x_train, None, params)
g_td = kernel_fn(x_test, x_train, params)
predict_fn = predict.gradient_descent_mse(g_dd, y_train, diag_reg=reg)
fx_train_0 = f(params, x_train)
fx_test_0 = f(params, x_test)
return predict_fn(ts, fx_train_0, fx_test_0, g_td)
def predict_mc(count, key):
key = random.split(key, count)
fx_train, fx_test = vmap(predict_empirical)(key)
fx_train_mean = np.mean(fx_train, axis=0, keepdims=True)
fx_test_mean = np.mean(fx_test, axis=0, keepdims=True)
fx_train_centered = fx_train - fx_train_mean
fx_test_centered = fx_test - fx_test_mean
cov_train = PredictTest._cov_empirical(fx_train_centered)
cov_test = PredictTest._cov_empirical(fx_test_centered)
return fx_train_mean, fx_test_mean, cov_train, cov_test
fx_train_mc, fx_test_mc, cov_train_mc, cov_test_mc = predict_mc(4096, key)
tol = 0.05
assert_close = lambda a, b: self.assertAllClose(ravel_pytree(a)[0],
ravel_pytree(b)[0],
atol=tol,
rtol=tol)
assert_close(fx_train_mc, fx_train_inf)
assert_close(cov_train_mc, cov_train_inf)
assert_close(cov_test_mc, cov_test_inf)
assert_close(fx_test_mc, fx_test_inf)
@test_utils.product(
train_size=TRAIN_SIZES[:-1],
test_size=TEST_SIZES[:-1],
input_shape=INPUT_SHAPES,
network=NETWORK[:-1],
out_logits=OUTPUT_LOGITS,
)
def testGradientDescentMseEnsembleGet(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
_, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
_, _, kernel_fn = _build_network(train_shape[1:], network, out_logits)
predictor = predict.gradient_descent_mse_ensemble(kernel_fn,
x_train,
y_train,
diag_reg=0.)
for x in [None, 'x_test']:
with self.subTest(x=x):
x = x if x is None else x_test
out = predictor(None, x, 'ntk', compute_cov=True)
assert isinstance(out, predict.Gaussian)
out = predictor(1., x, 'nngp', compute_cov=True)
assert isinstance(out, predict.Gaussian)
out = predictor(np.array([0., 1.]), x, ('ntk',), compute_cov=True)
assert len(out) == 1 and isinstance(out[0], predict.Gaussian)
out = predictor(2., x, ('ntk', 'nngp'), compute_cov=True)
assert (len(out) == 2 and isinstance(out[0], predict.Gaussian) and
isinstance(out[1], predict.Gaussian))
out2 = predictor(2., x, ('nngp', 'ntk'), compute_cov=True)
self.assertAllClose(out[0], out2[1])
self.assertAllClose(out[1], out2[0])
@test_utils.product(
train_size=TRAIN_SIZES[:-1],
test_size=TEST_SIZES[:-1],
input_shape=INPUT_SHAPES[:-1],
network=NETWORK[:-1],
out_logits=OUTPUT_LOGITS,
get=GETS
)
def testInfiniteTimeAgreement(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
get
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
_, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
_, _, kernel_fn = _build_network(train_shape[1:], network, out_logits)
reg = 0.
predictor = predict.gradient_descent_mse_ensemble(kernel_fn,
x_train,
y_train,
diag_reg=reg)
for x in (None, 'x_test'):
with self.subTest(x=x):
x = x if x is None else x_test
fin = predictor(t=np.inf, x_test=x, get=get, compute_cov=True)
inf = predictor(t=None, x_test=x, get=get, compute_cov=True)
self.assertAllClose(inf, fin)
if x is None:
fin_x = predictor(t=np.inf, x_test=x_train, get=get, compute_cov=True)
inf_x = predictor(t=None, x_test=x_train, get=get, compute_cov=True)
self.assertAllClose(inf, inf_x)
self.assertAllClose(inf_x, fin_x)
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
out_logits=OUTPUT_LOGITS,
)
def testZeroTimeAgreement(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
):
"""Test that the NTK and NNGP agree at t=0."""
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
_, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
predictor = predict.gradient_descent_mse_ensemble(
ker_fun,
x_train,
y_train,
diag_reg=reg)
for x in (None, 'x_test'):
with self.subTest(x=x):
x = x if x is None else x_test
zero = predictor(t=0.0, x_test=x, get=('NTK', 'NNGP'), compute_cov=True)
if x is None:
k = ker_fun(x_train, None, get='nngp')
ref = (np.zeros_like(y_train, k.dtype), k)
else:
ref = (np.zeros((test_shape[0], out_logits)),
ker_fun(x_test, None, get='nngp'))
self.assertAllClose((ref,) * 2, zero, check_dtypes=False)
if x is None:
zero_x = predictor(t=0.0, x_test=x_train, get=('NTK', 'NNGP'),
compute_cov=True)
self.assertAllClose((ref,) * 2, zero_x)
@classmethod
def _always_ntk(cls, ker_fun):
def always_ntk(x1, x2, get=('nngp', 'ntk')):
out = ker_fun(x1, x2, get=('nngp', 'ntk'))
if get == 'nngp' or get == 'ntk':
return out.ntk
else:
return out._replace(nngp=out.ntk)
return always_ntk
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
out_logits=OUTPUT_LOGITS,
)
def testNTK_NTKNNGPAgreement(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
_, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
predictor = predict.gradient_descent_mse_ensemble(ker_fun,
x_train,
y_train,
diag_reg=reg)
ts = np.logspace(-2, 8, 10).reshape((5, 2))
for t in (None, 'ts'):
for x in (None, 'x_test'):
with self.subTest(t=t, x=x):
x = x if x is None else x_test
t = t if t is None else ts
ntk = predictor(t=t, get='ntk', x_test=x)
# Test time broadcasting
if t is not None:
ntk_ind = np.array([predictor(t=t, get='ntk', x_test=x)
for t in t.ravel()]).reshape(
t.shape + ntk.shape[2:])
self.assertAllClose(ntk_ind, ntk)
always_ntk = self._always_ntk(ker_fun)
predictor_ntk = predict.gradient_descent_mse_ensemble(always_ntk,
x_train,
y_train,
diag_reg=reg)
ntk_nngp = predictor_ntk(t=t, get='nngp', x_test=x)
# Test if you use nngp equations with ntk, you get the same mean
self.assertAllClose(ntk, ntk_nngp)
# Next test that if you go through the NTK code path, but with only
# the NNGP kernel, we recreate the NNGP dynamics.
# Create a hacked kernel function that always returns the nngp kernel
def always_nngp(x1, x2, get=('nngp', 'ntk')):
out = ker_fun(x1, x2, get=('nngp', 'ntk'))
if get == 'nngp' or get == 'ntk':
return out.nngp
else:
return out._replace(ntk=out.nngp)
predictor_nngp = predict.gradient_descent_mse_ensemble(always_nngp,
x_train,
y_train,
diag_reg=reg)
nngp_cov = predictor(t=t,
get='nngp',
x_test=x,
compute_cov=True).covariance
# test time broadcasting for covariance
nngp_ntk_cov = predictor_nngp(t=t,
get='ntk',
x_test=x,
compute_cov=True).covariance
if t is not None:
nngp_ntk_cov_ind = np.array(
[predictor_nngp(t=t,
get='ntk',
x_test=x,
compute_cov=True).covariance for
t in t.ravel()]).reshape(t.shape + nngp_cov.shape[2:])
self.assertAllClose(nngp_ntk_cov_ind, nngp_ntk_cov)
# Test if you use ntk equations with nngp, you get the same cov
# Although, due to accumulation of numerical errors, only roughly.
self.assertAllClose(nngp_cov, nngp_ntk_cov)
@test_utils.product(
train_size=TRAIN_SIZES,
test_size=TEST_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
out_logits=OUTPUT_LOGITS,
)
def testPredCovPosDef(
self,
train_size,
test_size,
input_shape,
network,
out_logits,
):
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
_, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
ts = np.logspace(-3, 3, 10)
predict_fn_mse_ens = predict.gradient_descent_mse_ensemble(
ker_fun, x_train, y_train)
for get in ('nngp', 'ntk'):
for x in (None, 'x_test'):
for t in (None, 'ts'):
with self.subTest(get=get, x=x, t=t):
cov = predict_fn_mse_ens(t=t if t is None else ts,
get=get,
x_test=x if x is None else x_test,
compute_cov=True).covariance
self.assertAllClose(cov, np.moveaxis(cov, -1, -2))
self.assertGreater(np.min(np.linalg.eigh(cov)[0]), -1e-4)
@test_utils.product(
train_size=TRAIN_SIZES[:1],
test_size=TEST_SIZES[:1],
input_shape=INPUT_SHAPES[:1],
out_logits=[1],
)
def testTrainedEnsemblePredCov(
self,
train_size,
test_size,
input_shape,
out_logits
):
training_steps = 1000
learning_rate = 0.1
ensemble_size = 1024
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(128, W_std=1.2, b_std=0.05), stax.Erf(),
stax.Dense(out_logits, W_std=1.2, b_std=0.05))
opt_init, opt_update, get_params = optimizers.sgd(learning_rate)
opt_update = jit(opt_update)
train_shape = (train_size, *input_shape)
test_shape = (test_size, *input_shape)
key, x_test, x_train, y_train = self._get_inputs(out_logits, test_shape,
train_shape)
predict_fn_mse_ens = predict.gradient_descent_mse_ensemble(
kernel_fn,
x_train,
y_train,
learning_rate=learning_rate,
diag_reg=0.)
train = (x_train, y_train)
ensemble_key = random.split(key, ensemble_size)
loss = jit(lambda params, x, y: 0.5 * np.mean((apply_fn(params, x) - y)**2))
grad_loss = jit(lambda state, x, y: grad(loss)(get_params(state), x, y))
def train_network(key):
_, params = init_fn(key, (-1,) + train_shape[1:])
opt_state = opt_init(params)
for i in range(training_steps):
opt_state = opt_update(i, grad_loss(opt_state, *train), opt_state)
return get_params(opt_state)
params = vmap(train_network)(ensemble_key)
tol = 0.08
for x in [None, 'x_test']:
with self.subTest(x=x):
x = x if x is None else x_test
x_fin = x_train if x is None else x_test
ensemble_fx = vmap(apply_fn, (0, None))(params, x_fin)
mean_emp = np.mean(ensemble_fx, axis=0, keepdims=True)
mean_subtracted = ensemble_fx - mean_emp
cov_emp = np.einsum(
'ijk,ilk->jl', mean_subtracted, mean_subtracted, optimize=True) / (
mean_subtracted.shape[0] * mean_subtracted.shape[-1])
ntk = predict_fn_mse_ens(training_steps, x, 'ntk', compute_cov=True)
self.assertAllClose(ravel_pytree(mean_emp)[0],
ravel_pytree(ntk.mean)[0], rtol=tol, atol=tol)
self.assertAllClose(cov_emp, ntk.covariance, rtol=tol, atol=tol)
def testGradientDescentMseEnsembleTrain(self):
key = random.PRNGKey(1)
x = random.normal(key, (8, 4, 6, 3))
_, _, kernel_fn = stax.serial(stax.Conv(1, (2, 2)),
stax.Relu(),
stax.Conv(1, (2, 1)))
y = random.normal(key, (8, 2, 5, 1))
predictor = predict.gradient_descent_mse_ensemble(kernel_fn, x, y,
diagonal_spatial=False)
for t in [None, np.array([0., 1., 10.])]:
with self.subTest(t=t):
y_none = predictor(t, None, None, compute_cov=True)
y_x = predictor(t, x, None, compute_cov=True)
self.assertAllClose(y_none, y_x, rtol=0.04, atol=0.04)
def testGpInference(self):
reg = 1e-5
key = random.PRNGKey(1)
x_train = random.normal(key, (4, 2))
init_fn, apply_fn, kernel_fn_analytic = stax.serial(
stax.Dense(32, 2., 0.5),
stax.Relu(),
stax.Dense(10, 2., 0.5))
kernel_fn_empirical = nt.empirical_kernel_fn(apply_fn)
y_train = random.normal(key, (4, 10))
for kernel_fn_is_analytic in [True, False]:
if kernel_fn_is_analytic:
kernel_fn = kernel_fn_analytic
else:
_, params = init_fn(key, x_train.shape)
def kernel_fn(x1, x2, get):
return kernel_fn_empirical(x1, x2, get, params)
for get in [None,
'nngp', 'ntk',
('nngp',), ('ntk',),
('nngp', 'ntk'), ('ntk', 'nngp')]:
k_dd = kernel_fn(x_train, None, get)
gp_inference = predict.gp_inference(k_dd, y_train, diag_reg=reg)
gd_ensemble = predict.gradient_descent_mse_ensemble(kernel_fn,
x_train,
y_train,
diag_reg=reg)
for x_test in [None, 'x_test']:
x_test = None if x_test is None else random.normal(key, (8, 2))
k_td = None if x_test is None else kernel_fn(x_test, x_train, get)
for compute_cov in [True, False]:
with self.subTest(kernel_fn_is_analytic=kernel_fn_is_analytic,
get=get,
x_test=x_test if x_test is None else 'x_test',
compute_cov=compute_cov):
if compute_cov:
nngp_tt = (True if x_test is None else
kernel_fn(x_test, None, 'nngp'))
else:
nngp_tt = None
out_ens = gd_ensemble(None, x_test, get, compute_cov)
out_ens_inf = gd_ensemble(np.inf, x_test, get, compute_cov)
tol = 0.35 if jax.default_backend() == 'tpu' else 0.08
self.assertAllClose(out_ens_inf, out_ens, rtol=tol, atol=tol)
if (get is not None and
'nngp' not in get and
compute_cov and
k_td is not None):
with self.assertRaises(ValueError):
out_gp_inf = gp_inference(get=get, k_test_train=k_td,
k_test_test=nngp_tt)
else:
out_gp_inf = gp_inference(get=get, k_test_train=k_td,
k_test_test=nngp_tt)
self.assertAllClose(out_ens, out_gp_inf)
# Test NTKGP
for get in [(), ('nngp',), ('ntk',), ('nngp', 'ntk'), ('ntk', 'nngp')]:
ntkgp_get = get + ('ntkgp',)
if 'ntk' not in get:
get += ('ntk',)
k_dd = kernel_fn(x_train, None, get)
always_ntk = self._always_ntk(kernel_fn)
always_ntk_k_dd = always_ntk(x_train, None, get)
gp_inference = predict.gp_inference(k_dd, y_train, diag_reg=reg)
always_ntk_gp_inference = predict.gp_inference(always_ntk_k_dd, y_train,
diag_reg=reg)
gd_ensemble = predict.gradient_descent_mse_ensemble(kernel_fn,
x_train,
y_train,
diag_reg=reg)
for x_test in [None, 'x_test']:
x_test = None if x_test is None else random.normal(key, (8, 2))
k_td = None if x_test is None else kernel_fn(x_test, x_train, get)
always_ntk_k_td = None if x_test is None else always_ntk(x_test,
x_train)
for compute_cov in [True, False]:
with self.subTest(kernel_fn_is_analytic=kernel_fn_is_analytic,
get=ntkgp_get,
x_test=x_test if x_test is None else 'x_test',
compute_cov=compute_cov):
if compute_cov:
k_tt = (True if x_test is None else
kernel_fn(x_test, None, get))
always_ntk_tt = (True if x_test is None else
kernel_fn(x_test, None, 'ntk'))
else:
k_tt = None
always_ntk_tt = None
if ('nngp' not in get and
'ntk' in ntkgp_get and
compute_cov and
k_td is not None):
with self.assertRaises(ValueError):
out_gp_inf = gp_inference(get=ntkgp_get, k_test_train=k_td,
k_test_test=k_tt)
else:
out_gp_inf = gp_inference(get=ntkgp_get, k_test_train=k_td,
k_test_test=k_tt)
out_ens = gd_ensemble(None, x_test, get, compute_cov)
out_always_ntk_gp_inf = always_ntk_gp_inference(
get='nngp',
k_test_train=always_ntk_k_td,
k_test_test=always_ntk_tt)
# Compare ntkgp predictions to nngp code, with hacked kernel
for g in ntkgp_get:
self.assertAllClose(getattr(out_gp_inf, g),
(getattr(out_ens, g) if g != 'ntkgp'
else out_always_ntk_gp_inf))
def testPredictOnCPU(self):
x_train = random.normal(random.PRNGKey(1), (4, 4, 4, 2))
x_test = random.normal(random.PRNGKey(1), (8, 4, 4, 2))
y_train = random.uniform(random.PRNGKey(1), (4, 2))
_, _, kernel_fn = stax.serial(
stax.Conv(1, (3, 3)), stax.Relu(), stax.Flatten(), stax.Dense(1))
for store_on_device in [False, True]:
for device_count in [0, 1]:
for get in ['ntk', 'nngp', ('nngp', 'ntk'), ('ntk', 'nngp')]:
for x in [None, 'x_test']:
with self.subTest(
store_on_device=store_on_device,
device_count=device_count,
get=get,
x=x):
kernel_fn_batched = nt.batch(kernel_fn, 2, device_count,
store_on_device)
predictor = predict.gradient_descent_mse_ensemble(
kernel_fn_batched, x_train, y_train)
x = x if x is None else x_test
predict_none = predictor(None, x, get, compute_cov=True)
predict_inf = predictor(np.inf, x, get, compute_cov=True)
self.assertAllClose(predict_none, predict_inf)
if x is not None:
on_cpu = not store_on_device or jax.default_backend() == 'cpu'
def is_on_cpu(x):
return jax.tree_util.tree_all(
jax.tree_map(
lambda x: 'cpu' in str(x.device_buffer.device()
).lower(),
x))
self.assertEqual(on_cpu, is_on_cpu(predict_inf))
self.assertEqual(on_cpu, is_on_cpu(predict_none))
def testPredictND(self):
n_chan = 6
key = random.PRNGKey(1)
im_shape = (5, 4, 3)
n_train = 2
n_test = 2
x_train = random.normal(key, (n_train,) + im_shape)
y_train = random.uniform(key, (n_train, 3, 2, n_chan))
init_fn, apply_fn, _ = stax.Conv(n_chan, (3, 2), (1, 2))
_, params = init_fn(key, x_train.shape)
fx_train_0 = apply_fn(params, x_train)
for trace_axes in [(),
(-1,),
(-2,),
(-3,),
(0, 1),
(2, 3),
(2,),
(1, 3),
(0, -1),
(0, 0, -3),
(0, 1, 2, 3),
(0, 1, -1, 2)]:
for ts in [None, np.arange(6).reshape((2, 3))]:
for x in [None, 'x_test']:
with self.subTest(trace_axes=trace_axes, ts=ts, x=x):
t_shape = ts.shape if ts is not None else ()
y_test_shape = t_shape + (n_test,) + y_train.shape[1:]
y_train_shape = t_shape + y_train.shape
x = x if x is None else random.normal(key, (n_test,) + im_shape)
fx_test_0 = None if x is None else apply_fn(params, x)
kernel_fn = nt.empirical_kernel_fn(apply_fn, trace_axes=trace_axes)
kernel_fn = jit(kernel_fn, static_argnames='get')
ntk_train_train = kernel_fn(x_train, None, 'ntk', params)
if x is not None:
ntk_test_train = kernel_fn(x, x_train, 'ntk', params)
loss = lambda x, y: 0.5 * np.mean(x - y)**2
predict_fn_mse = predict.gradient_descent_mse(ntk_train_train,
y_train,
trace_axes=trace_axes)
predict_fn_mse_ensemble = predict.gradient_descent_mse_ensemble(
kernel_fn, x_train, y_train, trace_axes=trace_axes,
params=params
)
if x is None:
p_train_mse = predict_fn_mse(ts, fx_train_0)
else:
p_train_mse, p_test_mse = predict_fn_mse(
ts, fx_train_0, fx_test_0, ntk_test_train)
self.assertAllClose(y_test_shape, p_test_mse.shape)
self.assertAllClose(y_train_shape, p_train_mse.shape) # pytype: disable=attribute-error # jax-ndarray
p_nngp_mse_ens, p_ntk_mse_ens = predict_fn_mse_ensemble(
ts, x, ('nngp', 'ntk'), compute_cov=True)
ref_shape = y_train_shape if x is None else y_test_shape
self.assertAllClose(ref_shape, p_ntk_mse_ens.mean.shape)
self.assertAllClose(ref_shape, p_nngp_mse_ens.mean.shape)
if ts is not None:
predict_fn = predict.gradient_descent(
loss, ntk_train_train, y_train, trace_axes=trace_axes)
if x is None:
p_train = predict_fn(ts, fx_train_0)
else:
p_train, p_test = predict_fn(
ts, fx_train_0, fx_test_0, ntk_test_train)
self.assertAllClose(y_test_shape, p_test.shape)
self.assertAllClose(y_train_shape, p_train.shape)
@test_utils.product(
train_size=TRAIN_SIZES,
input_shape=INPUT_SHAPES,
network=NETWORK,
out_logits=OUTPUT_LOGITS,
kernel_type=list(KERNELS.keys()),
lr_factor=[0.5, 1., 3.],
momentum=[0., 0.1, 0.5, 0.9]
)
def testMaxLearningRate(
self,
train_size,
input_shape,
network,
out_logits,
kernel_type,
lr_factor,
momentum
):
key = random.PRNGKey(0)
key, split = random.split(key)
if len(input_shape) == 1:
train_shape = (train_size * 5, input_shape[0] * 10)
else:
train_shape = (16, 8, 8, 3)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
# Regress to an MSE loss.
loss = lambda params, x: 0.5 * np.mean((f(params, x) - y_train) ** 2)
grad_loss = jit(grad(loss))
def get_loss(opt_state):
return loss(get_params(opt_state), x_train)
steps = 30
fn_and_kernel = KERNELS[kernel_type]
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
g_dd = ntk(x_train, None, 'ntk')
step_size = predict.max_learning_rate(
g_dd, y_train_size=y_train.size, momentum=momentum) * lr_factor
opt_init, opt_update, get_params = optimizers.momentum(step_size,
mass=momentum)
opt_state = opt_init(params)
init_loss = get_loss(opt_state)
for i in range(steps):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
trained_loss = get_loss(opt_state)
loss_ratio = trained_loss / (init_loss + 1e-12)
if lr_factor < 1.:
self.assertLess(loss_ratio, 0.1)
elif lr_factor == 1:
# At the threshold, the loss decays slowly
self.assertLess(loss_ratio, 1.)
if lr_factor > 2.:
if not math.isnan(loss_ratio):
self.assertGreater(loss_ratio, 10.)
class PredictKwargsTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
do_batch=[True, False],
mode=['analytic', 'mc', 'empirical']
)
def test_kwargs(self, do_batch, mode):
rng = random.PRNGKey(1)
x_train = random.normal(rng, (8, 7, 10))
x_test = random.normal(rng, (4, 7, 10))
y_train = random.normal(rng, (8, 1))
rng_train, rng_test = random.split(rng, 2)
pattern_train = random.normal(rng, (8, 7, 7))
pattern_test = random.normal(rng, (4, 7, 7))
diag_reg = 1e-4
if jax.default_backend() == 'tpu':
atol = 3e-3
rtol = 4e-2
width = 256
else:
atol = 5e-4
rtol = 1e-2
width = 64
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(width, W_std=2**0.5),
stax.Relu(),
stax.Dropout(rate=0.7),
stax.Aggregate(),
stax.GlobalAvgPool(),
stax.Dense(width)
)
kw_dd = dict(pattern=(pattern_train, pattern_train))
kw_td = dict(pattern=(pattern_test, pattern_train))
kw_tt = dict(pattern=(pattern_test, pattern_test))
if mode == 'mc':
kernel_fn = nt.monte_carlo_kernel_fn(init_fn, apply_fn, rng, 2,
batch_size=2 if do_batch else 0)
elif mode == 'empirical':
kernel_fn = nt.empirical_kernel_fn(apply_fn)
if do_batch:
raise absltest.SkipTest('Batching of empirical kernel is not '
'implemented with keyword arguments.')
for kw in (kw_dd, kw_td, kw_tt):
kw.update(dict(params=init_fn(rng, x_train.shape)[1],
get=('nngp', 'ntk')))
kw_dd.update(dict(rng=(rng_train, None)))
kw_td.update(dict(rng=(rng_test, rng_train)))
kw_tt.update(dict(rng=(rng_test, None)))
elif mode == 'analytic':
if do_batch:
kernel_fn = nt.batch(kernel_fn, batch_size=2)
else:
raise ValueError(mode)
k_dd = kernel_fn(x_train, None, **kw_dd)
k_td = kernel_fn(x_test, x_train, **kw_td)
k_tt = kernel_fn(x_test, None, **kw_tt)
# Infinite time NNGP/NTK.
predict_fn_gp = predict.gp_inference(
k_dd,
y_train,
diag_reg=diag_reg
)
out_gp = predict_fn_gp(k_test_train=k_td, k_test_test=k_tt.nngp)
if mode == 'empirical':
for kw in (kw_dd, kw_td, kw_tt):
kw.pop('get')
predict_fn_ensemble = predict.gradient_descent_mse_ensemble(
kernel_fn,
x_train,
y_train,
diag_reg=diag_reg,
**kw_dd
)
out_ensemble = predict_fn_ensemble(x_test=x_test, compute_cov=True, **kw_tt)
self.assertAllClose(out_gp, out_ensemble)
# Finite time NTK test.
predict_fn_mse = predict.gradient_descent_mse(k_dd.ntk, y_train)
out_mse = predict_fn_mse(t=1.,
fx_train_0=None,
fx_test_0=0.,
k_test_train=k_td.ntk)
out_ensemble = predict_fn_ensemble(t=1.,
get='ntk',
x_test=x_test,
compute_cov=False,
**kw_tt)
self.assertAllClose(out_mse, out_ensemble, atol=atol, rtol=rtol)
# Finite time NTK train.
out_mse = predict_fn_mse(t=0.5,
fx_train_0=0.,
fx_test_0=None,
k_test_train=k_td.ntk)
out_ensemble = predict_fn_ensemble(t=0.5,
get='ntk',
x_test=None,
compute_cov=False,
**kw_dd)
self.assertAllClose(out_mse, out_ensemble, atol=atol, rtol=rtol)
# Finite time NNGP test.
predict_fn_mse = predict.gradient_descent_mse(k_dd.nngp, y_train)
out_mse = predict_fn_mse(t=1.,
fx_train_0=None,
fx_test_0=0.,
k_test_train=k_td.nngp)
out_ensemble = predict_fn_ensemble(t=1.,
get='nngp',
x_test=x_test,
compute_cov=False,
**kw_tt)
self.assertAllClose(out_mse, out_ensemble, atol=atol, rtol=rtol)
# Finite time NNGP train.
out_mse = predict_fn_mse(t=5.,
fx_train_0=0.,
fx_test_0=None,
k_test_train=k_td.nngp)
out_ensemble = predict_fn_ensemble(t=5.,
get='nngp',
x_test=None,
compute_cov=False,
**kw_dd)
self.assertAllClose(out_mse, out_ensemble, atol=atol, rtol=rtol)
class IsOnCpuTest(test_utils.NeuralTangentsTestCase):
def test_is_on_cpu(self):
dtypes = [np.float16, np.float32]
float64 = jax.dtypes.canonicalize_dtype(np.float64)
if float64 != np.float32:
dtypes += [float64]
for dtype in dtypes:
with self.subTest(dtype=dtype):
def x():
return random.normal(random.PRNGKey(1), (2, 3), dtype)
def x_cpu():
return jax.device_get(random.normal(random.PRNGKey(1), (2, 3), dtype))
x_jit = jit(x)
# x_cpu_jit = jit(x_cpu)
x_cpu_jit_cpu = jit(x_cpu, backend='cpu')
self.assertTrue(_is_on_cpu(x_cpu()))
# TODO(mattjj): re-enable this when device_put under jit works
# self.assertTrue(predict._is_on_cpu(x_cpu_jit()))
self.assertTrue(_is_on_cpu(x_cpu_jit_cpu()))
if jax.default_backend() == 'cpu':
self.assertTrue(_is_on_cpu(x()))
self.assertTrue(_is_on_cpu(x_jit()))
else:
self.assertFalse(_is_on_cpu(x()))
self.assertFalse(_is_on_cpu(x_jit()))
if __name__ == '__main__':
absltest.main()
| 44,227 | 36.197645 | 115 | py |
neural-tangents | neural-tangents-main/tests/empirical_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/empirical.py`."""
from functools import partial
import logging
import operator
from typing import Any, Callable, Sequence, Tuple, Optional, Dict, List
from absl.testing import absltest
from flax import linen as nn
import jax
from jax import jacobian, lax, remat
from jax import jit, tree_map
from jax import random
from jax.config import config
import jax.numpy as np
from jax.tree_util import tree_reduce
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src.utils import utils
from tests import test_utils
import numpy as onp
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
TAYLOR_MATRIX_SHAPES = [(3, 3), (4, 4)]
TAYLOR_RANDOM_SAMPLES = 10
FLAT = 'FLAT'
POOLING = 'POOLING'
CONV = 'CONV'
TRAIN_SHAPES = [(4, 4), (4, 8), (8, 8), (6, 4, 4, 3), (4, 4, 4, 3),
(4, 4, 4, 3)]
TEST_SHAPES = [(2, 4), (6, 8), (16, 8), (2, 4, 4, 3), (2, 4, 4, 3),
(2, 4, 4, 3)]
NETWORK = [FLAT, FLAT, FLAT, FLAT, POOLING, CONV]
OUTPUT_LOGITS = [1, 2, 3]
CONVOLUTION_CHANNELS = 2
test_utils.update_test_tolerance()
def _build_network(input_shape, network, out_logits):
if len(input_shape) == 1:
assert network == FLAT
return stax.Dense(out_logits, W_std=2.0, b_std=0.5)
elif len(input_shape) == 3:
if network == POOLING:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.GlobalAvgPool(), stax.Dense(out_logits, W_std=2.0, b_std=0.5))
elif network == CONV:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (1, 2), W_std=1.5, b_std=0.1),
stax.Relu(),
stax.Conv(CONVOLUTION_CHANNELS, (3, 2), W_std=2.0, b_std=0.05),
)
elif network == FLAT:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.Flatten(), stax.Dense(out_logits, W_std=2.0, b_std=0.5))
else:
raise ValueError('Unexpected network type found: {}'.format(network))
else:
raise ValueError('Expected flat or image test input.')
def _kernel_fns(key,
input_shape,
network,
out_logits,
diagonal_axes,
trace_axes,
vmap_axes=None):
init_fn, f, _ = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
kwargs = dict(
f=f,
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
)
ntk_fns = {
i: partial(jit(nt.empirical_ntk_fn(
**kwargs,
vmap_axes=vmap_axes,
implementation=i)),
params=params)
for i in nt.NtkImplementation
}
nngp_kernel_fn = partial(jit(nt.empirical_nngp_fn(**kwargs)),
params=params)
return nngp_kernel_fn, ntk_fns
KERNELS = {}
for o in OUTPUT_LOGITS:
KERNELS['empirical_logits_{}'.format(o)] = partial(_kernel_fns, out_logits=o)
class EmpiricalTest(test_utils.NeuralTangentsTestCase):
# We use a three layer deep linear network for testing.
@classmethod
def _f(cls, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
if do_shift_x:
x = x * 2 + 1.
return ({'list': [
{
'quadratic': 0.5 * np.dot(np.dot(x.T, w1), x) + np.dot(w2, x) + b,
'linear': np.dot(w1, x)
},
w2
]},)
@classmethod
def _f_lin_exact(cls, x0, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
f0 = EmpiricalTest._f(x0, params, do_alter, do_shift_x)
if do_shift_x:
x0 = x0 * 2 + 1.
x = x * 2 + 1.
dx = x - x0
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
return tree_map(
operator.add,
f0,
({'list': [
{
'quadratic': np.dot(np.dot(x0.T, w1) + w2, dx),
'linear': np.dot(w1, dx)
},
0.
]},)
)
@classmethod
def _get_init_data(cls, shape):
key = random.PRNGKey(0)
key, s1, s2, s3, = random.split(key, 4)
w1 = random.normal(s1, shape)
w1 = 0.5 * (w1 + w1.T)
w2 = random.normal(s2, shape)
b = random.normal(s3, (1,) * (len(shape) - 1) + (shape[-1],))
params = (w1, w2, b)
key, split = random.split(key)
x0 = random.normal(split, (shape[-1], 1))
return key, params, x0
@test_utils.product(
shape=TAYLOR_MATRIX_SHAPES
)
def testLinearization(self, shape):
key, params, x0 = self._get_init_data(shape)
f_lin = nt.linearize(EmpiricalTest._f, x0)
for _ in range(TAYLOR_RANDOM_SAMPLES):
for do_alter in [True, False]:
for do_shift_x in [True, False]:
key, split = random.split(key)
x = random.normal(split, (shape[-1], 1))
self.assertAllClose(
EmpiricalTest._f_lin_exact(
x0, x, params, do_alter, do_shift_x=do_shift_x),
f_lin(x, params, do_alter, do_shift_x=do_shift_x))
@test_utils.product(
shape=TAYLOR_MATRIX_SHAPES
)
def testTaylorExpansion(self, shape):
def f_2_exact(x0, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
f_lin = EmpiricalTest._f_lin_exact(x0, x, params, do_alter, do_shift_x)
if do_shift_x:
x0 = x0 * 2 + 1.
x = x * 2 + 1.
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
dx = x - x0
return tree_map(
operator.add,
f_lin,
({'list': [
{
'quadratic': 0.5 * np.dot(np.dot(dx.T, w1), dx),
'linear': 0.
},
0.
]},)
)
key, params, x0 = self._get_init_data(shape)
f_lin = nt.taylor_expand(EmpiricalTest._f, x0, 1)
f_2 = nt.taylor_expand(EmpiricalTest._f, x0, 2)
for _ in range(TAYLOR_RANDOM_SAMPLES):
for do_alter in [True, False]:
for do_shift_x in [True, False]:
key, split = random.split(key)
x = random.normal(split, (shape[-1], 1))
self.assertAllClose(
EmpiricalTest._f_lin_exact(x0, x, params, do_alter,
do_shift_x=do_shift_x),
f_lin(x, params, do_alter, do_shift_x=do_shift_x))
self.assertAllClose(f_2_exact(x0, x, params, do_alter,
do_shift_x=do_shift_x),
f_2(x, params, do_alter, do_shift_x=do_shift_x))
def _compare_kernels(self, x1, x2, ntk_fns, ntk_fns_vmapped, nngp_fn):
nngp = nngp_fn(x1, x2)
ntks = {i: ntk_fns[i](x1, x2) for i in ntk_fns}
ntks_vmapped = {i: ntk_fns_vmapped[i](x1, x2) for i in ntk_fns_vmapped}
ntk_ref = ntks[nt.NtkImplementation.JACOBIAN_CONTRACTION]
tree_map(lambda x, y: self.assertEqual(x.shape, y.shape), nngp, ntk_ref)
for i, ntk in ntks.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} impl. fails.')
for i, ntk in ntks_vmapped.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} vmapped impl. fails.')
@test_utils.product(
train_test_network=list(zip(TRAIN_SHAPES, TEST_SHAPES, NETWORK)),
kernel_type=list(KERNELS.keys())
)
def testNTKAgainstDirect(self, train_test_network, kernel_type):
kernel_fn = KERNELS[kernel_type]
train_shape, test_shape, network = train_test_network
key = random.PRNGKey(0)
key, self_split, other_split = random.split(key, 3)
x1 = random.normal(self_split, train_shape)
x2 = random.normal(other_split, test_shape)
nngp_fn, ntk_fns = kernel_fn(
key,
train_shape[1:],
network,
diagonal_axes=(),
trace_axes=()
)
_, ntk_fns_vmapped = kernel_fn(
key,
train_shape[1:],
network,
diagonal_axes=(),
trace_axes=(),
vmap_axes=0
)
self._compare_kernels(x1, None, ntk_fns, ntk_fns_vmapped, nngp_fn)
self._compare_kernels(x1, x2, ntk_fns, ntk_fns_vmapped, nngp_fn)
@test_utils.product(
diagonal_axes=[
(),
(0,),
(0, 1),
(0, 1, 2),
(0, 1, 2, 3),
(-1,),
(-2,),
(0, -1),
(1, -2),
(2, 3),
(3, 0, 2)
],
trace_axes=[
(),
(0,),
(0, 1),
(-1,),
(1,),
(0, -1),
(-1, -2),
(0, 1, 2, 3),
(3, 1, 2, 0),
(1, 2, 3),
(-3, -2),
(-3, -1),
(-2, -4),
(2, 0, -1)
]
)
def testAxes(self, diagonal_axes, trace_axes):
key = random.PRNGKey(0)
key, self_split, other_split = random.split(key, 3)
x1 = random.normal(self_split, (4, 5, 6, 3))
x2 = random.normal(other_split, (2, 5, 6, 3))
_diagonal_axes = tuple(d % x1.ndim for d in diagonal_axes)
_trace_axes = tuple(t % x1.ndim for t in trace_axes)
if any(d == c for d in _diagonal_axes for c in _trace_axes):
raise absltest.SkipTest(
'diagonal axes must be different from channel axes.')
get_kernel_fns = KERNELS['empirical_logits_3']
kwargs = dict(
key=key,
input_shape=(5, 6, 3),
network=CONV,
diagonal_axes=diagonal_axes,
trace_axes=trace_axes
)
nngp_fn, ntk_fns = get_kernel_fns(**kwargs)
_, ntk_fns_vmapped = get_kernel_fns(**kwargs, vmap_axes=0)
self._compare_kernels(x1, None, ntk_fns, ntk_fns_vmapped, nngp_fn)
if 0 not in _trace_axes and 0 not in _diagonal_axes:
self._compare_kernels(x1, x2, ntk_fns, ntk_fns_vmapped, nngp_fn)
@test_utils.product(
same_inputs=[True, False]
)
def test_parallel_in_out(self, same_inputs):
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2 = np.split(random.normal(input_key1, (3, 21)), (10,), axis=1)
x2_1, x2_2 = np.split(random.normal(input_key2, (4, 21)), (10,), axis=1)
x1 = (x1_1, x1_2)
x2 = (x2_1, x2_2) if not same_inputs else None
def layer(N_out):
return stax.parallel(stax.Dense(N_out), stax.Dense(N_out + 1))
init_fn, apply_fn, _ = stax.serial(layer(1024), layer(1))
_, params = init_fn(net_key, (x1_1.shape, x1_2.shape))
ntk_fns = {
i: jit(
partial(
nt.empirical_ntk_fn(
apply_fn,
implementation=i),
params=params))
for i in nt.NtkImplementation
}
ntk_fns_vmapped = {
i: jit(
partial(
nt.empirical_ntk_fn(
apply_fn,
implementation=i,
vmap_axes=(0, 0)),
params=params))
for i in nt.NtkImplementation
}
nngp_fn = jit(partial(nt.empirical_nngp_fn(apply_fn), params=params))
nngp = nngp_fn(x1, x2)
self.assertEqual(len(nngp), 2)
self.assertEqual(nngp[0].shape, (3, 3 if same_inputs else 4))
self.assertEqual(nngp[1].shape, (3, 3 if same_inputs else 4))
self._compare_kernels(x1, x2, ntk_fns, ntk_fns_vmapped, nngp_fn)
@test_utils.product(
same_inputs=[True, False]
)
def test_parallel_nested(self, same_inputs):
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2, x1_3 = np.split(random.normal(input_key1, (3, 33)),
(10, 21), axis=1)
x2_1, x2_2, x2_3 = np.split(random.normal(input_key2, (4, 33)),
(10, 21), axis=1)
x1 = ([x1_1, x1_2], x1_3)
x2 = ([x2_1, x2_2], x2_3) if not same_inputs else None
def layer(N_out):
return stax.parallel(stax.parallel(stax.Dense(N_out),
stax.Dense(N_out + 1)),
stax.Dense(N_out + 2))
init_fn, apply_fn, _ = stax.serial(layer(1024), layer(1))
_, params = init_fn(net_key, tree_map(np.shape, x1))
ntk_fns = {
i: jit(nt.empirical_ntk_fn(apply_fn, implementation=i))
for i in nt.NtkImplementation
}
ntk_fns_vmapped = {
i: jit(nt.empirical_ntk_fn(
apply_fn,
implementation=i,
vmap_axes=([0, 0], 0)
))
for i in nt.NtkImplementation
}
ntks = {i: ntk_fns[i](x1, x2, params) for i in ntk_fns}
ntks_vmapped = {i: ntk_fns_vmapped[i](x1, x2, params)
for i in ntk_fns_vmapped}
ntk_ref = ntks[nt.NtkImplementation.JACOBIAN_CONTRACTION]
for i, ntk in ntks.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} impl. fails.')
for i, ntk in ntks_vmapped.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} vmapped impl. fails.')
nngp_kernel_fn = jit(nt.empirical_nngp_fn(apply_fn))
nngp = nngp_kernel_fn(x1, x2, params)
self.assertEqual(len(nngp), 2)
nngp_shape = (3, 3 if same_inputs else 4)
self.assertEqual(nngp[0][0].shape, nngp_shape)
self.assertEqual(nngp[0][1].shape, nngp_shape)
self.assertEqual(nngp[1].shape, nngp_shape)
@test_utils.product(
same_inputs=[True, False, None],
in_dict=[True, False],
out_dict=[True, False]
)
def test_vmap_axes(self, same_inputs, out_dict, in_dict):
n1, n2 = 3, 4
c1, c2, c3 = 9, 5, 7
h2, h3, w3 = 6, 8, 2
def get_x(n, k):
k1, k2, k3 = random.split(k, 3)
x1 = random.normal(k1, (n, c1))
x2 = random.normal(k2, (h2, n, c2))
x3 = random.normal(k3, (c3, w3, n, h3))
x = [(x1, x2), x3]
return x
x1 = get_x(n1, random.PRNGKey(1))
p1 = random.normal(random.PRNGKey(5), (n1, h2, h2))
if same_inputs is None:
x2 = None
p2 = p1
elif same_inputs is False:
x2 = get_x(n2, random.PRNGKey(2))
p2 = random.normal(random.PRNGKey(6), (n2, h2, h2))
elif same_inputs is True:
x2 = [(None, None), None]
p2 = p1
else:
raise ValueError(same_inputs)
init_fn, apply_fn_, _ = stax.serial(
stax.parallel(
stax.parallel(
stax.serial(stax.Dense(4, 2., 0.1),
stax.Relu(),
stax.Dense(3, 1., 0.15)), # 1
stax.serial(stax.Conv(7, (2,), padding='SAME',
dimension_numbers=('HNC', 'OIH', 'NHC')),
stax.Erf(),
stax.Aggregate(1, 0, -1),
stax.GlobalAvgPool(),
stax.Dense(3, 0.5, 0.2)), # 2
),
stax.serial(
stax.Conv(5, (2, 3), padding='SAME',
dimension_numbers=('CWNH', 'IOHW', 'HWCN')),
stax.Sin(),
) # 3
),
stax.parallel(
stax.FanInSum(),
stax.Conv(2, (2, 1), dimension_numbers=('HWCN', 'OIHW', 'HNWC'))
)
)
_, params = init_fn(random.PRNGKey(3), tree_map(np.shape, x1))
in_axes = [(0, 1), 2]
out_axes = [-2, -3]
def nttree_to_pytree_in(x):
if x is None:
return x
return {'x1_x2': (x[0][0], x[0][1]), 'x3': (None, x[1],)}
def pytree_to_nttree_in(x):
if x is None:
return x
return [(x['x1_x2'][0], x['x1_x2'][1]), x['x3'][1]]
def nttree_to_pytree_out(x):
if x is None:
return None
return {'outs': [{'out_1': x[0]}, (x[1], None)]}
if in_dict:
x1 = nttree_to_pytree_in(x1)
x2 = nttree_to_pytree_in(x2)
in_axes = nttree_to_pytree_in(in_axes)
if out_dict:
out_axes = nttree_to_pytree_out(out_axes)
def apply_fn(params, x, **kwargs):
if in_dict:
x = pytree_to_nttree_in(x)
out = apply_fn_(params, x, **kwargs)
if out_dict:
out = nttree_to_pytree_out(out)
return out
ntk_fns = {
i: jit(nt.empirical_ntk_fn(apply_fn, implementation=i))
for i in nt.NtkImplementation
}
ntk_fns_vmapped = {
i: jit(nt.empirical_ntk_fn(
apply_fn,
implementation=i,
vmap_axes=(in_axes, out_axes, dict(pattern=0))
))
for i in nt.NtkImplementation
}
ntks = {i: ntk_fns[i](x1, x2, params, pattern=(p1, p2))
for i in ntk_fns}
ntks_vmapped = {i: ntk_fns_vmapped[i](x1, x2, params, pattern=(p1, p2))
for i in ntk_fns_vmapped}
ntk_ref = ntks[nt.NtkImplementation.JACOBIAN_CONTRACTION]
for i, ntk in ntks.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} impl. fails.')
for i, ntk in ntks_vmapped.items():
self.assertAllClose(ntk_ref, ntk, err_msg=f'{i} vmapped impl. fails.')
PyTree = Any
_functions: Dict[str, Callable[[PyTree, PyTree], PyTree]] = {
'[p[0]**(p[1] + x), p[2] * x + p[0]]':
lambda p, x: [np.abs(p[0])**(p[1] + x), p[2] * x + p[0]],
'[p[0]**(p[1] + x), p[2] / x + p[0]]':
lambda p, x: [np.abs(p[0])**(p[1] + x), p[2] / x + p[0]],
'[p[0] * p[1] * p[2] + (p[0] @ p[1].T) @ (p[2].T @ p[1]) @ x]':
lambda p, x: [p[0] * p[1] * p[2] + (p[0] @ p[1].T) @ (p[2].T @ p[1]) @ x],
'[p[0] / (p[1] * p[2]) + (p[0] @ p[1].T) @ (p[2].T @ p[1]) @ x]':
lambda p, x: [p[0] / (p[1] * p[2]) + (p[0] @ p[1].T) @ (p[2].T @ p[1]) @ x],
'x': lambda p, x: x,
'(x, x)': lambda p, x: (x, x),
'(x, (x, p))': lambda p, x: (x, (x, p)),
'[np.eye(1)]': lambda p, x: [np.eye(1)],
'x**2': lambda p, x: x**2,
'x @ x.T': lambda p, x: x @ x.T,
'p': lambda p, x: p,
'p[0] * p[1]': lambda p, x: p[0] * p[1],
'p[0] + p[1]': lambda p, x: p[0] + p[1],
'p[0] + p[1].T': lambda p, x: p[0] + p[1].T,
'p[0] + p[1] + p[2]': lambda p, x: p[0] + p[1] + p[2],
'p[0] + p[0].T': lambda p, x: p[0] + p[0].T,
'p[0] + p[0]': lambda p, x: p[0] + p[0],
'p[2] * x + p[2] / x': lambda p, x: p[2] * x + p[2] / x,
'-p[0] + 2 * p[1] - p[2] * 3': lambda p, x: -p[0] + p[1],
'-p[0] + 2 / p[1] - p[2] / 3': lambda p, x: -p[0] + 2 / p[1] - p[2] / 3,
'np.prod(p[2])': lambda p, x: np.prod(p[2]),
'sum(p)': lambda p, x: tree_reduce(lambda x, y: x + np.sum(y), 0.),
'prod(p)': lambda p, x: tree_reduce(lambda x, y: x * np.prod(y), 1.),
'sum(p)_typed': lambda p, x: tree_reduce(lambda x, y: x + np.sum(y), np.zeros((), x.dtype)),
'prod(p)_typed': lambda p, x: tree_reduce(lambda x, y: x * np.prod(y), np.ones((), x.dtype)),
'x + p[0]': lambda p, x: x + p[0],
'x - p[1]': lambda p, x: x - p[1],
'-p[0]': lambda p, x: -p[0],
'np.squeeze(np.expand_dims(p[0], 0))': lambda p, x: np.squeeze(np.expand_dims(p[0], 0)),
'p[1]**2': lambda p, x: p[1]**2,
'p[1] * p[1]': lambda p, x: p[1] * p[1],
'p[1] / p[1]': lambda p, x: p[1] / p[1],
'p[1] * p[0]': lambda p, x: p[1] * p[0],
'p[1] / p[0]': lambda p, x: p[1] / p[0],
'p[1] * np.expand_dims(np.arange(p[1].shape[1]))': lambda p, x: p[1] * np.expand_dims(np.arange(p[1].shape[1])),
'p[1] * np.expand_dims(p[0][0])': lambda p, x: p[1] * np.expand_dims(p[0][0]),
'p[1] / np.expand_dims(np.arange(p[1].shape[1]))': lambda p, x: p[1] / np.expand_dims(np.arange(p[1].shape[1])),
'p[1] / np.expand_dims(p[0][0])': lambda p, x: p[1] / np.expand_dims(p[0][0]),
'[p[0], p[1], p[0] / p[1], 2 * p[0], -p[1] + p[0]]': lambda p, x: [p[0], p[1], p[0] / p[1], 2 * p[0], -p[1] + p[0]],
'[np.sum(p[0], axis=0), np.sum(p[0], axis=1)]': lambda p, x: [np.sum(p[0], axis=0), np.sum(p[0], axis=1)],
'[np.sum(p[0], axis=0, keepdims=True), np.sum(p[0], axis=1, keepdims=True)]': lambda p, x: [np.sum(p[0], axis=0, keepdims=True), np.sum(p[0], axis=1, keepdims=True)],
'[p[0], np.sum(p[0], axis=0, keepdims=True), np.sum(p[0], axis=1, keepdims=True)]': lambda p, x: [p[0], np.sum(p[0], axis=0, keepdims=True), np.sum(p[0], axis=1, keepdims=True)],
'[p[0], p[0].T]': lambda p, x: [p[0], p[0].T],
'[p[0], p[0]]': lambda p, x: [p[0], p[0]],
'[p[0].reshape((-1,), p[0].reshape((-1,))]': lambda p, x: [p[0].reshape((-1,)), p[0].reshape((-1,))],
'[p[0].reshape((2, -1)), p[0].reshape((-1, 2))]': lambda p, x: [p[0].reshape((2, -1)), p[0].reshape((-1, 2))],
'[p[0].reshape((2, -1)), p[0].reshape((-1, 2)), p[0].T.reshape((2, -1)), p[0].T.reshape((-1, 2))]': lambda p, x: [p[0].reshape((2, -1)), p[0].reshape((-1, 2)), p[0].T.reshape((2, -1)), p[0].T.reshape((-1, 2))],
'[p[0], p[0].T, p[0].reshape((-1,))]': lambda p, x: [p[0], p[0].T, p[0].reshape((-1,))],
'[p[0], p[0].T, p[0].reshape((-1,)), p[0].reshape((-1, 1))': lambda p, x: [p[0], p[0].T, p[0].reshape((-1,)), p[0].reshape((-1, 1))],
'[p[0], p[0].T, p[0].reshape((-1,)), 2 * p[0].reshape((-1, 1)), -p[0].reshape((1, -1))': lambda p, x: [p[0], p[0].T, p[0].reshape((-1,)), 2 * p[0].reshape((-1, 1)), -p[0].reshape((1, -1))],
'p[0] @ p[0]': lambda p, x: p[0] @ p[0],
'p[0] @ p[1]': lambda p, x: p[0] @ p[1],
'p[0] @ p[1] @ p[2]': lambda p, x: p[0] @ p[1] @ p[2],
'p[0].T @ p[0]': lambda p, x: p[0].T @ p[0],
'p[1].T @ p[0]': lambda p, x: p[1].T @ p[0],
'p[2] @ p[0] @ p[1]': lambda p, x: p[2] @ p[0] @ p[1],
'(p[0] @ p[1], p[0])': lambda p, x: (p[0] @ p[1], p[0]),
'(p[0] @ p[1], p[1])': lambda p, x: (p[0] @ p[1], p[1]),
'(p[0] @ p[1], p[1].T)': lambda p, x: (p[0] @ p[1], p[1].T),
'(p[0] @ p[1], p[0].T)': lambda p, x: (p[0] @ p[1], p[0].T),
'np.sum(p[0])': lambda p, x: np.sum(p[0]),
'np.sum(p[0], axis=1, keepdims=True)': lambda p, x: np.sum(p[0], axis=1, keepdims=True),
'np.sum(p[1], axis=0, keepdims=False)': lambda p, x: np.sum(p[1], axis=0, keepdims=False),
'np.sum(p[0] @ p[0])': lambda p, x: np.sum(p[0] @ p[0]),
'np.sum(p[2] * p[1])': lambda p, x: np.sum(p[2] * p[1]),
'np.sum(p[1] * p[1])': lambda p, x: np.sum(p[1] * p[1]),
'np.sum(p[2] / p[1])': lambda p, x: np.sum(p[2] / p[1]),
'np.sum(p[1] / p[1])': lambda p, x: np.sum(p[1] / p[1]),
'np.zeros((2, 4))': lambda p, x: np.zeros((2, 4)),
'np.zeros((2, 4))_typed': lambda p, x: np.zeros((2, 4), x.dtype),
'np.ones((1, 2))': lambda p, x: np.ones((1, 2)),
'p[2]': lambda p, x: p[2],
'[p[1], p[0], p[2]]': lambda p, x: [p[1], p[0], p[2]],
'np.real(p[2])': lambda p, x: np.real(p[2]),
'np.real(x)': lambda p, x: np.real(x),
'np.imag(p[2])': lambda p, x: np.imag(p[2]),
'np.imag(x)': lambda p, x: np.imag(x),
'np.abs(np.real(p[2]) + np.imag(p[2])) @ np.imag(p[0])': lambda p, x: np.abs(np.real(p[2]) + np.imag(p[2])) @ np.imag(p[0]),
'[np.real(p[1]), np.imag(p[0]), np.abs(-p[2])],': lambda p, x: [np.real(p[1]), np.imag(p[0]), np.abs(-p[2])],
'lax.complex(p[0], p[1])': lambda p, x: lax.complex(p[0], p[1]),
'lax.conj(p[0])': lambda p, x: lax.conj(p[0]),
'lax.conj(p[0]) @ lax.conj(p[1])': lambda p, x: lax.conj(p[0]) @ lax.conj(p[1]),
'lax.complex(x, p[1]) * lax.complex(p[0], p[2])': lambda p, x: lax.complex(x, p[1]) * lax.complex(p[0], p[2]),
'long': lambda p, x: [
p[0] @ (p[1] + p[0] @ (p[1] / p[0])),
(p[2], x[1]),
x[1:2, :] * (x - 1.),
x[1] * p[2][:1, 0],
np.array(1., dtype=x.dtype)
],
'reshape': lambda p, x: [p[0].reshape((1, -1,)) @ p[1].reshape((-1, 1)),
p[2][:2, :3].reshape((3, 2))],
'p[0].reshape((-1,)).reshape((3, 3)).T': lambda p, x: p[0].reshape((-1,)).reshape((3, 3)).T,
'lax_reshape_all': lambda p, x: [
lax.reshape(p[0], (1, onp.prod(p[0].shape, dtype=int)), tuple(reversed(range(p[0].ndim)))),
lax.reshape(p[0], (onp.prod(p[0].shape, dtype=int), 1, 1)),
lax.reshape(p[1], (onp.prod(p[1].shape, dtype=int),), tuple(range(p[1].ndim))),
lax.reshape(p[1], (1, onp.prod(p[1].shape, dtype=int), 1), tuple(reversed(range(p[1].ndim)))),
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(range(p[2].ndim))),
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(reversed(range(p[2].ndim)))),
lax.reshape(p[2], utils.zip_flat(reversed(p[2].shape), [1] * p[2].ndim), tuple(range(p[2].ndim))),
lax.reshape(p[2], (1,) + tuple(reversed(p[2].shape)) + (1,), tuple(reversed(range(p[2].ndim)))),
lax.reshape(p[2], p[2].shape, tuple(reversed(range(p[2].ndim)))),
lax.reshape(p[2], p[2].shape, tuple(range(p[2].ndim))),
lax.reshape(p[2], p[2].shape + (1,), tuple(range(p[2].ndim))),
lax.reshape(p[2], (1, 1) + p[2].shape, tuple(range(p[2].ndim))),
lax.reshape(p[2], p[2].shape),
],
'lax_reshape_1_2': lambda p, x: [
lax.reshape(p[0], (1, onp.prod(p[0].shape, dtype=int)), tuple(reversed(range(p[0].ndim)))) * np.prod(p[0]),
lax.reshape(p[0], (onp.prod(p[0].shape, dtype=int), 1, 1)) * np.sum(p[0]),
],
'lax_reshape_3_4': lambda p, x: [
lax.reshape(p[1], (onp.prod(p[1].shape, dtype=int),), tuple(range(p[1].ndim))) + np.sum(p[1]),
lax.reshape(p[1], (1, onp.prod(p[1].shape, dtype=int), 1), tuple(reversed(range(p[1].ndim)))) - np.sum(p[1]),
],
'lax_reshape_12_13': lambda p, x: [
lax.reshape(p[2], (1, 1) + p[2].shape, tuple(range(p[2].ndim))) * np.sum(p[1]) - 3,
lax.reshape(p[2], p[2].shape) + np.sum(p[0]) + 1,
],
'lax_reshape_1_10_11': lambda p, x: [
lax.reshape(p[0], (1, onp.prod(p[0].shape, dtype=int)), tuple(reversed(range(p[0].ndim)))) + np.sum(p[2]),
lax.reshape(p[2], p[2].shape, tuple(range(p[2].ndim))) * np.sum(p[0]),
lax.reshape(p[2], p[2].shape + (1,), tuple(range(p[2].ndim))) + np.sum(p[1]) - 1,
],
'lax_reshape_4_5_6': lambda p, x: [
lax.reshape(p[1], (1, onp.prod(p[1].shape, dtype=int), 1), tuple(reversed(range(p[1].ndim)))),
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(range(p[2].ndim))),
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(reversed(range(p[2].ndim)))),
],
'lax_reshape_5_6': lambda p, x: [
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(range(p[2].ndim))),
lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(reversed(range(p[2].ndim)))),
],
'lax_reshape_1': lambda p, x: lax.reshape(p[0], (1, onp.prod(p[0].shape, dtype=int)), tuple(reversed(range(p[0].ndim)))),
'lax_reshape_2': lambda p, x: lax.reshape(p[0], (onp.prod(p[0].shape, dtype=int), 1, 1)),
'lax_reshape_3': lambda p, x: lax.reshape(p[1], (onp.prod(p[1].shape, dtype=int),), tuple(range(p[1].ndim))),
'lax_reshape_4': lambda p, x: lax.reshape(p[1], (1, onp.prod(p[1].shape, dtype=int), 1), tuple(reversed(range(p[1].ndim)))),
'lax_reshape_5': lambda p, x: lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(range(p[2].ndim))),
'lax_reshape_6': lambda p, x: lax.reshape(p[2], tuple(reversed(p[2].shape)), tuple(reversed(range(p[2].ndim)))),
'lax_reshape_7': lambda p, x: lax.reshape(p[2], utils.zip_flat(reversed(p[2].shape), [1] * p[2].ndim), tuple(range(p[2].ndim))),
'lax_reshape_8': lambda p, x: lax.reshape(p[2], (1,) + tuple(reversed(p[2].shape)) + (1,), tuple(reversed(range(p[2].ndim)))),
'lax_reshape_9': lambda p, x: lax.reshape(p[2], p[2].shape, tuple(reversed(range(p[2].ndim)))),
'lax_reshape_10': lambda p, x: lax.reshape(p[2], p[2].shape, tuple(range(p[2].ndim))),
'lax_reshape_11': lambda p, x: lax.reshape(p[2], p[2].shape + (1,), tuple(range(p[2].ndim))),
'lax_reshape_12': lambda p, x: lax.reshape(p[2], (1, 1) + p[2].shape, tuple(range(p[2].ndim))),
'lax_reshape_13': lambda p, x: lax.reshape(p[2], p[2].shape),
'rev': lambda p, x: (lax.rev(p[0], (0,)), lax.rev(p[1], (1,)), lax.rev(p[2], [0, 1])),
'rev2': lambda p, x: lax.rev(p[0], (0,)) * lax.rev(p[1], (1,)) - lax.rev(p[2], [0, 1])**2,
'np.squeeze(p[0]) * np.squeeze(p[1])': lambda p, x: np.squeeze(p[0]) * np.squeeze(p[1]),
'pad_1': lambda p, x: lax.pad(p[0], np.ones((), p[0].dtype), [(0, 0, 0), (0, 1, 2)]),
'pad_np_const': lambda p, x: np.pad(p[0], [(0, 0), (1, 2)]),
'pad_np_wrap': lambda p, x: np.pad(p[0], [(2, 3), (0, 0)], 'wrap'),
'pad_np_max': lambda p, x: np.pad(p[0], [(0, 0), (1, 0)], 'maximum'),
'pad_2': lambda p, x: lax.pad(p[1], np.ones((), p[1].dtype), [(0, 0, 0), (0, 0, 0)]),
'pad_3': lambda p, x: lax.pad(p[2], np.ones((), p[2].dtype), [(1, 0, 2), (0, 1, 0)]),
'pad_4': lambda p, x: lax.pad(p[0], np.ones((), p[0].dtype), [(0, 0, 0), (0, 1, 2)]) + lax.pad(p[0], np.ones((), p[0].dtype), [(0, 1, 2), (0, 0, 0)]).T,
'lax.concatenate([p[0], p[0]], 0)': lambda p, x: lax.concatenate([p[0], p[0]], 0),
'lax.concatenate([p[0], p[0]], 1)': lambda p, x: lax.concatenate([p[0], p[0]], 1),
'lax.concatenate([p[0], p[1]], 0)': lambda p, x: lax.concatenate([p[0], p[1]], 0),
'lax.concatenate([p[0], p[1]], 1)': lambda p, x: lax.concatenate([p[0], p[1]], 1),
'lax.concatenate([p[0], p[0].T], 0)': lambda p, x: lax.concatenate([p[0], p[0].T], 0),
'lax.concatenate([p[0], p[0].T], 1)': lambda p, x: lax.concatenate([p[0], p[0].T], 1),
'lax.concatenate([p[0], p[1], p[0].T], 1)': lambda p, x: lax.concatenate([p[0], p[1], p[0].T], 1),
'lax.concatenate([p[0], p[1], p[0].T], 0)': lambda p, x: lax.concatenate([p[0], p[1], p[0].T], 0),
'lax.concatenate(p, 0)': lambda p, x: lax.concatenate(p, 0),
'lax.concatenate(p, 1)': lambda p, x: lax.concatenate(p, 1),
'(lax.concatenate([p[0], x], 1) @ lax.concatenate([p[1], p[2]], 0))**2': lambda p, x: (lax.concatenate([p[0], x], 1) @ lax.concatenate([p[1], p[2]], 0))**2,
'np.transpose(np.stack(p), (0, 1, 2))': lambda p, x: np.transpose(np.stack(p), (0, 1, 2)),
'np.transpose(np.stack(p), (0, 2, 1))': lambda p, x: np.transpose(np.stack(p), (0, 2, 1)),
'np.transpose(np.stack(p), (1, 0, 2))': lambda p, x: np.transpose(np.stack(p), (1, 0, 2)),
'np.transpose(np.stack(p), (1, 2, 0))': lambda p, x: np.transpose(np.stack(p), (1, 2, 0)),
'np.transpose(np.stack(p), (2, 1, 0))': lambda p, x: np.transpose(np.stack(p), (2, 1, 0)),
'np.transpose(np.stack(p), (2, 0, 1))': lambda p, x: np.transpose(np.stack(p), (2, 0, 1)),
'transpose_3': lambda p, x: np.transpose(np.expand_dims(np.stack(p, 1), 0), (2, 0, 3, 1)),
'transpose_4': lambda p, x: np.transpose(np.expand_dims(np.stack(p, 1), 1), (0, 2, 1, 3)),
'transpose_5': lambda p, x: np.transpose(np.expand_dims(np.stack(p, 2), 2), (0, 1, 2, 3)),
'transpose_6': lambda p, x: np.transpose(np.expand_dims(np.stack(p, 2), 0), (1, 0, 3, 2)),
# pytype: disable=module-attr
'lax._reduce_window_sum_1': lambda p, x: lax._reduce_window_sum(p[0], (1, 2), (1, 1), [(0, 0), (0, 1)]),
'lax._reduce_window_sum_2': lambda p, x: lax._reduce_window_sum(p[0], (1, 1), (1, 1), [(0, 0), (0, 0)]),
'lax._reduce_window_sum_3': lambda p, x: lax._reduce_window_sum(p[0], (2, 1), (1, 2), [(0, 0), (0, 2)]),
'lax._reduce_window_sum_4': lambda p, x: lax._reduce_window_sum(p[0], (2, 2), (1, 1), [(2, 3), (0, 0)]),
'lax._reduce_window_sum_5': lambda p, x: lax._reduce_window_sum(p[0], (1, 1), (2, 1), [(0, 0), (1, 0)]),
# pytype: enable=module-attr
'dg1-l': lambda p, x: lax.dot_general(p[0], x, (((), ()), ((), ()))),
'dg2-l': lambda p, x: lax.dot_general(p[0], x, (((1,), (0,)), ((), ()))),
'dg3-l': lambda p, x: lax.dot_general(p[0], x, (((0,), (0,)), ((), ()))),
'dg4-l': lambda p, x: lax.dot_general(p[0], x, (((0, 1), (0, 1)), ((), ()))),
'dg5-l': lambda p, x: lax.dot_general(p[0], x, (((1,), (1,)), ((0,), (0,)))),
'dg6-l': lambda p, x: lax.dot_general(p[0], x, (((), ()), ((0, 1), (0, 1)))),
'dg7-l': lambda p, x: lax.dot_general(p[0], x, (((), ()), ((1,), (0,)))),
'dg8-l': lambda p, x: lax.dot_general(p[0], x, (((0,), (1,)), ((1,), (0,)))),
'dg1-r': lambda p, x: lax.dot_general(x, p[0], (((), ()), ((), ()))),
'dg2-r': lambda p, x: lax.dot_general(x, p[0], (((1,), (0,)), ((), ()))),
'dg3-r': lambda p, x: lax.dot_general(x, p[0], (((0,), (0,)), ((), ()))),
'dg4-r': lambda p, x: lax.dot_general(x, p[0], (((0, 1), (0, 1)), ((), ()))),
'dg5-r': lambda p, x: lax.dot_general(x, p[0], (((1,), (1,)), ((0,), (0,)))),
'dg6-r': lambda p, x: lax.dot_general(x, p[0], (((), ()), ((0, 1), (0, 1)))),
'dg7-r': lambda p, x: lax.dot_general(x, p[0], (((), ()), ((1,), (0,)))),
'dg8-r': lambda p, x: lax.dot_general(x, p[0], (((0,), (1,)), ((1,), (0,)))),
'dg1-p': lambda p, x: lax.dot_general(p[0], p[1], (((), ()), ((), ()))),
'dg2-p': lambda p, x: lax.dot_general(p[0], p[1], (((1,), (0,)), ((), ()))),
'dg3-p': lambda p, x: lax.dot_general(p[0], p[1], (((0,), (0,)), ((), ()))),
'dg4-p': lambda p, x: lax.dot_general(p[0], p[1], (((0, 1), (0, 1)), ((), ()))),
'dg5-p': lambda p, x: lax.dot_general(p[0], p[1], (((1,), (1,)), ((0,), (0,)))),
'dg6-p': lambda p, x: lax.dot_general(p[0], p[1], (((), ()), ((0, 1), (0, 1)))),
'dg7-p': lambda p, x: lax.dot_general(p[0], p[1], (((), ()), ((1,), (0,)))),
'dg8-p': lambda p, x: lax.dot_general(p[0], p[1], (((0,), (1,)), ((1,), (0,)))),
'p[1] * p[0][1, 0]': lambda p, x: p[1] * p[0][1, 0],
'p[1] / p[0][0, -1]': lambda p, x: p[1] / p[0][1, -1],
# TODO(romann): investigate full support for compiled loops.
'lax.map_1': lambda p, x: lax.map(lambda s: 2 * s, p[0]) * np.sum(p[1]),
'lax.map_2': lambda p, x: lax.map(lambda s: 2 * s + 1, p[0]) * np.sum(p[0]),
'lax.map_3': lambda p, x: np.sum(lax.map(lambda s: -s / 2., p[0])) * p[0],
'lax.map_4': lambda p, x: lax.map(lambda s: -s / 2., p[0]) * lax.map(lambda s: 2 * s, p[0]),
'lax.map_5': lambda p, x: (lax.map(lambda s: lax.map(lambda p: 2 * p, s) + 1., p[0]), p[1]),
'lax.map_6': lambda p, x: [lax.map(lambda s: lax.map(lambda p: 2 * p, s) + 1., p[0]), p[0]],
# TODO(romann): revisit if JAX figures out AD for out-of-bounds indexing.
# 'p[0][1, 0] * p[2].T': lambda p, x: p[0][1, 0] * p[2].T,
}
def _compare_ntks(
self,
do_jit,
do_remat,
f,
p,
x1,
x2,
_j_rules,
_s_rules,
_fwd,
vmap_axes=None,
allow_forward_pass_fail=False,
rtol=None,
atol=None,
):
if do_remat:
f = remat(f)
try:
f1 = f(p, x1)
jacobian(f)(p, x1)
if x2 is not None:
f2 = f(p, x2)
jacobian(f)(p, x2)
except Exception as e:
logging.exception(e)
if allow_forward_pass_fail:
raise absltest.SkipTest('Forward/Jacobian pass fails!')
else:
raise e
k_fns = {
i: nt.empirical_ntk_fn(
f=f,
trace_axes=(),
implementation=i,
vmap_axes=vmap_axes,
_j_rules=_j_rules,
_s_rules=_s_rules,
_fwd=_fwd
)
for i in nt.NtkImplementation
if i not in (nt.NtkImplementation.AUTO,)
}
if do_jit:
for i in k_fns:
k_fns[i] = jit(k_fns[i])
kernels = {
i: k_fns[i](x1, x2, p)
for i in k_fns
}
kernels = list(enumerate(kernels.items()))
for idx_1, (i_1, k_1) in kernels:
for idx2_2, (i_2, k_2) in kernels[idx_1 + 1:]:
msg = f'Mismatch between implementations {i_1} and {i_2}'
self.assertAllClose(
k_1,
k_2,
rtol=rtol,
atol=atol,
check_dtypes=False, # TODO(romann): revisit.
check_finite=False,
err_msg=msg)
class StructuredDerivativesTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
_j_rules=[
True,
False
],
_s_rules=[
True,
False
],
_fwd=[
True,
False,
None,
],
same_inputs=[
# True,
False
],
shapes=[
# [[p_i.shape for i in range(num_params)],
# [x1.shape, x2.shape]]
[[(3, 3), (3, 3), (3, 3)],
[(3, 3), (3, 3)]],
[[(5, 1, 2), (2, 1, 3), (4, 3, 1)],
[(2, 3), (3, 2)]],
[[(2, 3), (3, 2, 1), (2, 3, 5)],
[(2, 3), (3, 2)]],
[[(2, 2), (2, 2), (2, 2)],
[(3, 3), (3, 3)]],
[[(3, 3), (3, 3), (3, 3)],
[(3, 3), (2, 3)]],
[[(3, 2), (2, 3), (3, 1)],
[(1,), (1,)]],
[[(3, 2), (2, 3), (3, 1)],
[(2,), (1,)]],
[[(2, 1), (2, 4), (4, 1)],
[(2, 2), (2, 2)]],
[[(2, 1), (2, 4), (4, 1)],
[(1, 2), (2, 2)]],
[[(5,), (1, 5), (5, 1)],
[(5, 5), (5, 5)]],
[[(5,), (1, 5), (5, 1)],
[(4, 5), (5, 5)]],
[[(1, 1), (0, 0), (0, 1)],
[(1, 0), (1, 0)]],
[[(1, 1), (0, 0), (0, 1)],
[(2, 0), (2, 0)]],
[[(1, 2), (2, 0), (3, 1)],
[(1, 4), (1, 4)]],
[[(1, 2), (2, 0), (3, 1)],
[(1, 4), (2, 4)]],
[[(3, 2), (2, 1), (3, 1)],
[(1, 4), (1, 3)]],
[[(3, 2), (2, 1), (3, 1)],
[(1, 4), (2, 4)]],
[[(), (2, 1), (3, 1)],
[(1,), (2,)]],
[[(1,), (1,), (1,)],
[(2,), (2,)]],
[[(0,), (0,), (0,)],
[(0,), (0,)]],
[[(), (), ()],
[(), ()]],
[[(), (), ()],
[(2,), (1,)]],
[[(2,), (), (1,)],
[(0,), (2,)]],
[[(2,), (0, 3), (1,)],
[(3, 2), (2, 1)]]
],
p_list=[
True,
# False
],
x_list=[
# True,
False
],
dtype=[
np.float32,
# np.float64,
# np.float16,
],
do_jit=[
True,
# False
],
do_remat=[
# TODO(romann): support remat
# True,
False
],
f_name=list(_functions.keys())
)
def test_function(
self,
same_inputs,
f_name,
shapes,
p_list,
x_list,
do_jit,
do_remat,
dtype,
_j_rules,
_s_rules,
_fwd
):
if f_name == 'lax_reshape_all':
# TODO(romann): investigate slow CPU execution.
test_utils.skip_test('Skipping large non-structured reshapes on CPU.')
if 'lax.map' in f_name and shapes[0][0] and shapes[0][0][0] == 0:
# TODO(romann): fix.
raise absltest.SkipTest('Zero-length scans not supported without JIT.')
p = [random.normal(random.PRNGKey(i), s, dtype) for i, s in
enumerate(shapes[0])]
k1, k2 = random.split(random.PRNGKey(len(shapes)))
x1 = random.normal(k1, shapes[1][0], dtype)
x2 = None if same_inputs else random.normal(k2, shapes[1][1], dtype)
if not p_list:
p = p[0]
if x_list:
x1 = [x1]
x2 = [x2]
if dtype == np.float16:
atol = 0.1
rtol = 0.01
else:
atol = None
rtol = None
_compare_ntks(
self,
do_jit=do_jit,
do_remat=do_remat,
f=_functions[f_name],
p=p,
x1=x1,
x2=x2,
atol=atol,
rtol=rtol,
allow_forward_pass_fail=True,
_j_rules=_j_rules,
_s_rules=_s_rules,
_fwd=_fwd
)
# FLAX examples forked from https://github.com/google/flax.
class _MLP(nn.Module):
features: Sequence[int]
@nn.compact
def __call__(self, x):
for feat in self.features[:-1]:
x = nn.relu(nn.Dense(feat)(x))
x = nn.Dense(self.features[-1])(x)
return x
class _CNN(nn.Module):
features: int
feature_group_counts: List[int]
@nn.compact
def __call__(self, x):
x = nn.Conv(features=self.features, kernel_size=(3, 3),
feature_group_count=self.feature_group_counts[0])(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=self.features, kernel_size=(3, 3),
feature_group_count=self.feature_group_counts[1])(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=128)(x)
x = nn.relu(x)
x = nn.Dense(features=10)(x)
x = nn.log_softmax(x)
return x
class _AutoEncoder(nn.Module):
encoder_widths: Sequence[int]
decoder_widths: Sequence[int]
input_shape: Sequence[int]
def setup(self):
input_dim = onp.prod(self.input_shape)
self.encoder = _MLP(self.encoder_widths)
self.decoder = _MLP(tuple(self.decoder_widths) + (input_dim,))
def __call__(self, x):
return self.decode(self.encode(x))
def encode(self, x):
assert x.shape[1:] == self.input_shape
return self.encoder(np.reshape(x, (x.shape[0], -1)))
def decode(self, z):
z = self.decoder(z)
x = nn.sigmoid(z)
x = np.reshape(x, (x.shape[0],) + tuple(self.input_shape))
return x
class _Encoder(nn.Module):
latents: int
@nn.compact
def __call__(self, x):
x = nn.Dense(32, name='fc1')(x)
x = nn.relu(x)
mean_x = nn.Dense(self.latents, name='fc2_mean')(x)
logvar_x = nn.Dense(self.latents, name='fc2_logvar')(x)
return mean_x, logvar_x
class _Decoder(nn.Module):
@nn.compact
def __call__(self, z):
z = nn.Dense(16, name='fc1')(z)
z = nn.relu(z)
z = nn.Dense(32, name='fc2')(z)
return z
class _VAE(nn.Module):
latents: int = 20
def setup(self):
self.encoder = _Encoder(self.latents)
self.decoder = _Decoder()
def __call__(self, x, z_rng):
mean, logvar = self.encoder(x)
z = _reparameterize(z_rng, mean, logvar)
recon_x = self.decoder(z)
return recon_x, mean, logvar
def generate(self, z):
return nn.sigmoid(self.decoder(z))
_ModuleDef = Any
class _ResNetBlock(nn.Module):
"""ResNet block."""
filters: int
conv: _ModuleDef
norm: _ModuleDef
act: Callable
strides: Tuple[int, int] = (1, 1)
@nn.compact
def __call__(self, x,):
residual = x
y = self.conv(self.filters, (3, 3), self.strides)(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape:
residual = self.conv(self.filters, (1, 1),
self.strides, name='conv_proj')(residual)
residual = self.norm(name='norm_proj')(residual)
return self.act(residual + y)
class _BottleneckResNetBlock(nn.Module):
"""Bottleneck ResNet block."""
filters: int
conv: _ModuleDef
norm: _ModuleDef
act: Callable
strides: Tuple[int, int] = (1, 1)
@nn.compact
def __call__(self, x):
residual = x
y = self.conv(self.filters, (1, 1))(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3), self.strides)(y)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters * 4, (1, 1))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape:
residual = self.conv(self.filters * 4, (1, 1),
self.strides, name='conv_proj')(residual)
residual = self.norm(name='norm_proj')(residual)
return self.act(residual + y)
class _ResNet(nn.Module):
"""A narrow ResNetV1."""
stage_sizes: Sequence[int]
block_cls: _ModuleDef
num_classes: int
num_filters: int = 4
dtype: Any = np.float32
act: Callable = nn.relu
conv: _ModuleDef = nn.Conv
@nn.compact
def __call__(self, x, train: bool = True):
conv = partial(self.conv, use_bias=False, dtype=self.dtype)
norm = partial(nn.BatchNorm,
use_running_average=not train,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype)
x = conv(self.num_filters, (7, 7), (2, 2),
padding=[(3, 3), (3, 3)],
name='conv_init')(x)
x = norm(name='bn_init')(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(self.num_filters * 2 ** i,
strides=strides,
conv=conv,
norm=norm,
act=self.act)(x)
x = np.mean(x, axis=(1, 2))
x = nn.Dense(self.num_classes, dtype=self.dtype)(x)
x = np.asarray(x, self.dtype)
return x
_ResNet18 = partial(_ResNet, stage_sizes=[1, 1, 1, 1],
block_cls=_ResNetBlock)
def _reparameterize(rng, mean, logvar):
std = np.exp(0.5 * logvar)
eps = random.normal(rng, logvar.shape)
return mean + eps * std
# MLP Mixer forked from https://github.com/google-research/vision_transformer.
class _MlpBlock(nn.Module):
mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.Dense(self.mlp_dim)(x)
y = nn.gelu(y)
return nn.Dense(x.shape[-1])(y)
class _MixerBlock(nn.Module):
"""Mixer block layer."""
tokens_mlp_dim: int
channels_mlp_dim: int
@nn.compact
def __call__(self, x):
y = nn.LayerNorm()(x)
y = np.swapaxes(y, 1, 2)
y = _MlpBlock(self.tokens_mlp_dim, name='token_mixing')(y)
y = np.swapaxes(y, 1, 2)
x = x + y
y = nn.LayerNorm()(x)
return x + _MlpBlock(self.channels_mlp_dim, name='channel_mixing')(y)
class _MlpMixer(nn.Module):
"""Mixer architecture."""
patches: Any
num_classes: int
num_blocks: int
hidden_dim: int
tokens_mlp_dim: int
channels_mlp_dim: int
model_name: Optional[str] = None
@nn.compact
def __call__(self, inputs, *, train):
del train
x = nn.Conv(self.hidden_dim, self.patches['size'],
strides=self.patches['size'], name='stem')(inputs)
x = x.reshape((x.shape[0], -1, x.shape[-1]))
for _ in range(self.num_blocks):
x = _MixerBlock(self.tokens_mlp_dim, self.channels_mlp_dim)(x)
x = nn.LayerNorm(name='pre_head_layer_norm')(x)
x = np.mean(x, axis=1)
if self.num_classes:
x = nn.Dense(self.num_classes, kernel_init=nn.initializers.zeros,
name='head')(x)
return x
def _get_mixer_b16_config() -> Dict[str, Any]:
"""Returns a narrow Mixer-B/16 configuration."""
return dict(
model_name='Mixer-B_16',
patches={'size': (16, 16)},
hidden_dim=16,
num_blocks=2,
tokens_mlp_dim=4,
channels_mlp_dim=8,
)
@test_utils.product(
j_rules=[
True,
False
],
s_rules=[
True,
# False
],
fwd=[
True,
False,
None,
],
same_inputs=[
# True,
False
],
do_jit=[
True,
# False
],
do_remat=[
# True,
False
],
dtype=[
jax.dtypes.canonicalize_dtype(np.float64),
]
)
class FlaxOtherTest(test_utils.NeuralTangentsTestCase):
def test_mlp(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd):
model = _MLP([12, 8, 4])
k1, k2, ki = random.split(random.PRNGKey(1), 3)
x1 = random.normal(k1, (4, 10), dtype)
x2 = None if same_inputs else random.normal(k2, (3, 10), dtype)
p = model.init(ki, x1)
_compare_ntks(self, do_jit, do_remat, model.apply, p, x1, x2, j_rules,
s_rules, fwd, vmap_axes=0)
def test_autoencoder(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd):
test_utils.skip_test(self)
model = _AutoEncoder(encoder_widths=[20, 10, 5],
decoder_widths=[5, 10, 20],
input_shape=(12,))
k1, k2, ki = random.split(random.PRNGKey(1), 3)
x1 = random.normal(k1, (5, 12), dtype)
x2 = None if same_inputs else random.normal(k2, (2, 12), dtype)
p = model.init(ki, x1)
# Test encoding-decoding.
_compare_ntks(self, do_jit, do_remat, model.apply, p, x1, x2, j_rules,
s_rules, fwd, vmap_axes=0)
# Test encoding.
def encode(p, x):
return model.apply(p, x, method=model.encode)
_compare_ntks(self, do_jit, do_remat, encode, p, x1, x2, j_rules,
s_rules, fwd, vmap_axes=0)
# Test decoding.
x1d = model.apply(p, x1, method=model.encode)
x2d = None if x2 is None else model.apply(p, x2, method=model.encode)
def decode(p, x):
return model.apply(p, x, method=model.decode)
_compare_ntks(self, do_jit, do_remat, decode, p, x1d, x2d, j_rules,
s_rules, fwd, vmap_axes=0)
# Test manual encoding-decoding
def encode_decode(p, x):
encoded = model.apply(p, x, method=model.encode)
decoded = model.apply(p, encoded, method=model.decode)
return decoded
# Test encoding-decoding.
_compare_ntks(self, do_jit, do_remat, encode_decode, p, x1, x2, j_rules,
s_rules, fwd, vmap_axes=0)
def test_vae(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd):
test_utils.skip_test(self)
model = _VAE(latents=2)
k1, k2, ki, kzi, kza = random.split(random.PRNGKey(1), 5)
x1 = random.normal(k1, (1, 1), dtype)
x2 = None if same_inputs else random.normal(k2, (1, 1), dtype)
p = model.init(ki, x1, z_rng=kzi)
_compare_ntks(self, do_jit, do_remat, partial(model.apply, z_rng=kza),
p, x1, x2, j_rules, s_rules, fwd)
def test_resnet18(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd):
test_utils.skip_test(self)
model = _ResNet18(num_classes=1)
k1, k2, ki = random.split(random.PRNGKey(1), 3)
x1 = random.normal(k1, (1, 128, 128, 1), dtype)
x2 = None if same_inputs else random.normal(k2, (1, 128, 128, 1), dtype)
p = model.init(ki, x1)
def apply_fn(params, x):
return model.apply(params, x, mutable=['batch_stats'])[0]
_compare_ntks(self, do_jit, do_remat, apply_fn, p, x1, x2, j_rules,
s_rules, fwd)
def test_mixer_b16(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd):
test_utils.skip_test(self)
model = _MlpMixer(num_classes=1, **_get_mixer_b16_config())
k1, k2, ki = random.split(random.PRNGKey(1), 3)
x1 = random.normal(k1, (1, 128, 128, 1), dtype)
x2 = None if same_inputs else random.normal(k2, (1, 128, 128, 1), dtype)
p = model.init(ki, x1, train=True)
def apply_fn(params, x):
return model.apply(params, x, mutable=['batch_stats'], train=True)[0]
_compare_ntks(self, do_jit, do_remat, apply_fn, p, x1, x2, j_rules,
s_rules, fwd)
@test_utils.product(
j_rules=[
True,
False
],
s_rules=[
True,
False
],
fwd=[
True,
False,
None,
],
same_inputs=[
# True,
False
],
do_jit=[
True,
# False
],
do_remat=[
# True,
False
],
dtype=[
jax.dtypes.canonicalize_dtype(np.float64),
],
feature_group_counts=[
[1, 1],
[1, 5],
[5, 1],
[5, 5]
],
)
class FlaxCnnTest(test_utils.NeuralTangentsTestCase):
def test_flax_cnn(self, same_inputs, do_jit, do_remat, dtype, j_rules,
s_rules, fwd, feature_group_counts):
test_utils.skip_test(self)
n_chan = 5
x1 = random.normal(random.PRNGKey(1), (2, 8, 8, n_chan), dtype)
x2 = None if same_inputs else random.normal(random.PRNGKey(2),
(3, 8, 8, n_chan),
dtype)
model = _CNN(n_chan, feature_group_counts)
p = model.init(random.PRNGKey(0), x1)
_compare_ntks(self, do_jit, do_remat, model.apply, p, x1, x2, j_rules,
s_rules, fwd, vmap_axes=0)
@test_utils.product(
j_rules=[
True,
False
],
s_rules=[
True,
False
],
fwd=[
True,
False,
None,
],
same_inputs=[
# True,
False
],
do_jit=[
True,
# False
],
do_remat=[
# True,
False
],
dtype=[
jax.dtypes.canonicalize_dtype(np.float64),
],
n_chan_in=[
1,
2,
3,
4
],
batch_size=[
1,
2,
3,
4
],
group_count=[
1,
2,
4,
8,
16,
],
group_mode=[
'batch',
'feature'
],
vmap_axes=[
0,
None
]
)
class ConvTest(test_utils.NeuralTangentsTestCase):
def test_conv(
self,
same_inputs,
do_jit,
do_remat,
dtype,
j_rules,
s_rules,
fwd,
n_chan_in,
batch_size,
group_count,
group_mode,
vmap_axes
):
# TODO(b/235167364): unskip when the bug is fixed.
test_utils.skip_test(self, platforms=('cpu', 'tpu',))
n_chan_out = 16
if group_mode == 'batch':
batch_group_count = group_count
feature_group_count = 1
if vmap_axes == 0 and group_count > 1:
raise absltest.SkipTest('Batch grouped convolution not vmap-able.')
elif group_mode == 'feature':
batch_group_count = 1
feature_group_count = group_count
else:
raise ValueError(group_mode)
n_chan_in *= feature_group_count
batch_size *= batch_group_count
x1 = random.normal(random.PRNGKey(1), (batch_size, n_chan_in, 5, 4), dtype)
x2 = None if same_inputs else random.normal(random.PRNGKey(2),
(batch_size, n_chan_in, 5, 4),
dtype)
p = random.normal(random.PRNGKey(2),
(n_chan_out, n_chan_in // feature_group_count, 3, 2))
def f(p, x):
return lax.conv_general_dilated(x, p, (1, 1), 'SAME',
feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
_compare_ntks(self, do_jit, do_remat, f, p, x1, x2, j_rules, s_rules, fwd,
vmap_axes=vmap_axes)
class EmpiricalNtkVpTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
same_inputs=[
True,
False
],
do_jit=[
True,
False
],
)
def test_ntk_vp_fn(
self,
same_inputs,
do_jit,
):
N1 = 4
N2 = N1 if same_inputs else 6
O = 3
init_fn, f, _ = stax.serial(
stax.Dense(8),
stax.Relu(),
stax.Dense(O)
)
k1, k2, k3, k4 = random.split(random.PRNGKey(1), 4)
x1 = random.normal(k1, (N1, 7))
x2 = None if same_inputs else random.normal(k2, (N2, 7))
_, params = init_fn(k3, x1.shape)
ntk_ref = nt.empirical_ntk_fn(f, (), vmap_axes=0)(x1, x2, params)
ntk_ref = np.moveaxis(ntk_ref, 1, 2)
# Compute an NTK via NTK-vps and compare to the reference
ntk_vp_fn = nt.empirical_ntk_vp_fn(f, x1, x2, params)
if do_jit:
ntk_vp_fn = jit(ntk_vp_fn)
eye = np.eye(N2 * O).reshape((N2 * O, N2, O))
ntk_vps = jit(jax.vmap(ntk_vp_fn))(eye)
ntk_vps = np.moveaxis(ntk_vps, (0,), (2,))
ntk_vps = ntk_vps.reshape((N1, O, N2, O))
self.assertAllClose(ntk_ref, ntk_vps)
# Compute a single NTK-vp via reference NTK, and compare to the NTK-vp.
cotangents = random.normal(k4, f(params, x1 if same_inputs else x2).shape)
ntk_vp_ref = np.tensordot(ntk_ref, cotangents, ((2, 3), (0, 1)))
ntk_vp = ntk_vp_fn(cotangents)
self.assertAllClose(ntk_vp_ref, ntk_vp)
if __name__ == '__main__':
absltest.main()
| 55,785 | 32.009467 | 214 | py |
neural-tangents | neural-tangents-main/tests/monte_carlo_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/monte_carlo.py`."""
from absl.testing import absltest
import jax
from jax import random
from jax.config import config
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src import batching
from neural_tangents._src import monte_carlo
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
BATCH_SIZES = [
2,
4,
]
WIDTH = 192
DEVICE_COUNTS = [0, 1, 2]
STORE_ON_DEVICE = [True, False]
ALL_GET = ('nngp', 'ntk', ('nngp', 'ntk'), None)
test_utils.update_test_tolerance()
def _get_inputs_and_model(width=1, n_classes=2, use_conv=True):
key = random.PRNGKey(1)
key, split = random.split(key)
x1 = random.normal(key, (8, 4, 3, 2))
x2 = random.normal(split, (4, 4, 3, 2))
if not use_conv:
x1 = np.reshape(x1, (x1.shape[0], -1))
x2 = np.reshape(x2, (x2.shape[0], -1))
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Conv(width, (3, 3)) if use_conv else stax.Dense(width),
stax.Relu(),
stax.Flatten(),
stax.Dense(n_classes, 2., 0.5))
return x1, x2, init_fn, apply_fn, kernel_fn, key
class MonteCarloTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
batch_size=BATCH_SIZES,
device_count=DEVICE_COUNTS,
store_on_device=STORE_ON_DEVICE,
get=ALL_GET,
)
def test_sample_once_batch(
self,
batch_size,
device_count,
store_on_device,
get
):
test_utils.stub_out_pmap(batching, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = nt.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(kernel_fn, init_fn)
sample_once_batch_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, batch_size, device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_sample_batch = sample_once_batch_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_sample_batch)
@test_utils.product(
batch_size=BATCH_SIZES,
device_count=DEVICE_COUNTS,
store_on_device=STORE_ON_DEVICE,
get=ALL_GET
)
def test_batch_sample_once(
self,
batch_size,
device_count,
store_on_device,
get
):
test_utils.stub_out_pmap(batching, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = nt.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, device_count=0)
batch_sample_once_fn = batching.batch(sample_once_fn, batch_size,
device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_batch_sample = batch_sample_once_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_batch_sample)
@test_utils.product(
batch_size=BATCH_SIZES,
device_count=DEVICE_COUNTS,
store_on_device=STORE_ON_DEVICE
)
def test_sample_vs_analytic_nngp(
self,
batch_size,
device_count,
store_on_device
):
test_utils.stub_out_pmap(batching, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
WIDTH, 256, jax.default_backend() == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 200,
batch_size, device_count,
store_on_device)
ker_empirical = sample(x1, x2, 'nngp')
ker_analytic = stax_kernel_fn(x1, x2, 'nngp')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@test_utils.product(
batch_size=BATCH_SIZES,
device_count=DEVICE_COUNTS,
store_on_device=STORE_ON_DEVICE
)
def test_monte_carlo_vs_analytic_ntk(
self,
batch_size,
device_count,
store_on_device
):
test_utils.stub_out_pmap(batching, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
WIDTH, 2, jax.default_backend() == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 100,
batch_size, device_count,
store_on_device,
vmap_axes=0)
ker_empirical = sample(x1, x2, 'ntk')
ker_analytic = stax_kernel_fn(x1, x2, 'ntk')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@test_utils.product(
batch_size=BATCH_SIZES,
device_count=DEVICE_COUNTS,
store_on_device=STORE_ON_DEVICE,
get=ALL_GET
)
def test_monte_carlo_generator(
self,
batch_size,
device_count,
store_on_device,
get
):
test_utils.stub_out_pmap(batching, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(8, 1)
x3, x4, _, _, _, _ = _get_inputs_and_model(8, 1)
log_n_max = 4
n_samples = [2**k for k in range(log_n_max)]
sample_generator = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples, batch_size, device_count,
store_on_device, vmap_axes=0)
if get is None:
samples_12 = sample_generator(x1, x2)
samples_34 = sample_generator(x3, x4)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key,
n, batch_size,
device_count,
store_on_device,
vmap_axes=0)
sample_12 = sample_fn(x1, x2)
sample_34 = sample_fn(x3, x4)
self.assertAllClose(s_12, sample_12)
self.assertAllClose(s_12, s_34)
self.assertAllClose(s_12, sample_34)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, ('nngp', 'ntk'))
ker_analytic_34 = stax_kernel_fn(x3, x4, ('nngp', 'ntk'))
else:
samples_12 = sample_generator(x1, x2, get)
samples_34 = sample_generator(x3, x4, get)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n, batch_size,
device_count, store_on_device, vmap_axes=0)
sample_12 = sample_fn(x1, x2, get)
sample_34 = sample_fn(x3, x4, get)
self.assertAllClose(s_12, sample_12)
self.assertAllClose(s_12, s_34)
self.assertAllClose(s_12, sample_34)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, get)
ker_analytic_34 = stax_kernel_fn(x3, x4, get)
self.assertAllClose(ker_analytic_12, s_12, atol=2., rtol=2.)
self.assertAllClose(ker_analytic_12, ker_analytic_34)
@test_utils.product(
same_inputs=[True, False],
batch_size=[1, 2]
)
def test_parallel_in_out_mc(self, same_inputs, batch_size):
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2, x1_3 = random.normal(input_key1, (3, 2, 5))
x1 = (x1_1, (x1_2, x1_3))
if same_inputs:
x2 = None
else:
x2_1, x2_2, x2_3 = random.normal(input_key2, (3, 4, 5))
x2 = (x2_1, (x2_2, x2_3))
def net(N_out):
return stax.parallel(stax.Dense(N_out),
stax.parallel(stax.Dense(N_out + 1),
stax.Dense(N_out + 2)))
# Check NNGP.
init_fn, apply_fn, _ = net(WIDTH)
nb_kernel_fn = monte_carlo.monte_carlo_kernel_fn(init_fn,
apply_fn,
net_key,
n_samples=4,
trace_axes=(-1,))
kernel_fn = monte_carlo.monte_carlo_kernel_fn(init_fn,
apply_fn,
net_key,
n_samples=4,
batch_size=batch_size,
trace_axes=(-1,))
self.assertAllClose(kernel_fn(x1, x2, 'nngp'), nb_kernel_fn(x1, x2, 'nngp'))
if __name__ == '__main__':
absltest.main()
| 9,216 | 31.340351 | 80 | py |
neural-tangents | neural-tangents-main/tests/infinite_fcn_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/function_space.py`."""
from absl.testing import absltest
from jax.config import config
from examples import infinite_fcn
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class InfiniteFcnTest(test_utils.NeuralTangentsTestCase):
def test_infinite_fcn(self):
infinite_fcn.main(None)
if __name__ == '__main__':
absltest.main()
| 1,002 | 26.861111 | 74 | py |
neural-tangents | neural-tangents-main/tests/elementwise_numerical_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `examples/elementwise_numerical.py`."""
from absl.testing import absltest
from jax.config import config
from examples import elementwise_numerical
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
class ElementwiseNumericalTest(test_utils.NeuralTangentsTestCase):
def test_elementwise_numerical(self):
elementwise_numerical.main(None)
if __name__ == '__main__':
absltest.main()
| 1,045 | 28.055556 | 74 | py |
neural-tangents | neural-tangents-main/tests/test_utils.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing."""
import dataclasses
import itertools
import logging
import os
from types import ModuleType
from typing import Callable, Dict, Optional, Sequence, Tuple
from absl import flags
from absl.testing import parameterized
import jax
from jax import config
from jax import dtypes as _dtypes
from jax import jit
from jax import vmap
import jax.numpy as np
import numpy as onp
flags.DEFINE_string(
'nt_test_dut',
'',
help=
'Describes the device under test in case special consideration is required.'
)
flags.DEFINE_integer(
'nt_num_generated_cases',
int(os.getenv('NT_NUM_GENERATED_CASES', '4')),
help='Number of generated cases to test'
)
FLAGS = flags.FLAGS
# Utility functions forked from :obj:`jax._src.public_test_util`.
_python_scalar_dtypes: Dict[type, onp.dtype] = {
bool: onp.dtype('bool'),
int: onp.dtype('int64'),
float: onp.dtype('float64'),
complex: onp.dtype('complex128'),
}
def _dtype(x) -> onp.dtype:
if hasattr(x, 'dtype'):
return x.dtype
elif type(x) in _python_scalar_dtypes:
return onp.dtype(_python_scalar_dtypes[type(x)])
else:
return onp.asarray(x).dtype
def _is_sequence(x) -> bool:
try:
iter(x)
except TypeError:
return False
else:
return True
def device_under_test() -> str:
return getattr(FLAGS, 'nt_test_dut', None) or jax.default_backend()
_DEFAULT_TOLERANCE: Dict[onp.dtype, float] = {
onp.dtype(onp.bool_): 0,
onp.dtype(onp.int32): 0,
onp.dtype(onp.int64): 0,
onp.dtype(onp.float16): 5e-3,
onp.dtype(onp.float32): 5e-3,
onp.dtype(onp.float64): 1e-5,
onp.dtype(np.bfloat16): 5e-3
}
def _default_tolerance() -> Dict[onp.dtype, float]:
if device_under_test() != 'tpu':
return _DEFAULT_TOLERANCE
tol = _DEFAULT_TOLERANCE.copy()
tol[onp.dtype(onp.float32)] = 5e-2
tol[onp.dtype(onp.complex64)] = 5e-2
return tol
def _assert_numpy_allclose(
a: onp.ndarray,
b: onp.ndarray,
atol: Optional[float] = None,
rtol: Optional[float] = None,
err_msg: str = ''
):
if a.dtype == b.dtype == _dtypes.float0:
onp.testing.assert_array_equal(a, b, err_msg=err_msg)
return
a = a.astype(onp.float32) if a.dtype == _dtypes.bfloat16 else a
b = b.astype(onp.float32) if b.dtype == _dtypes.bfloat16 else b
kw = {}
if atol: kw['atol'] = atol
if rtol: kw['rtol'] = rtol
with onp.errstate(invalid='ignore'):
# TODO(phawkins): surprisingly, assert_allclose sometimes reports invalid
# value errors. It should not do that.
onp.testing.assert_allclose(a, b, **kw, err_msg=err_msg)
def _tolerance(dtype: onp.dtype, tol: Optional[float] = None) -> float:
tol = {} if tol is None else tol
if not isinstance(tol, dict):
return tol
tol = {onp.dtype(key): value for key, value in tol.items()}
dtype = _dtypes.canonicalize_dtype(onp.dtype(dtype))
return tol.get(dtype, _default_tolerance()[dtype])
_CACHED_INDICES: Dict[int, Sequence[int]] = {}
def _cases_from_list(xs):
xs = list(xs)
n = len(xs)
if n < FLAGS.nt_num_generated_cases:
return xs
k = min(n, FLAGS.nt_num_generated_cases)
# Random sampling for every parameterized test is expensive. Do it once and
# cache the result.
indices = _CACHED_INDICES.get(n)
if indices is None:
rng = onp.random.RandomState(42)
_CACHED_INDICES[n] = indices = rng.permutation(n)
return [xs[i] for i in indices[:k]]
def product(*kwargs_seqs, **testgrid):
"""Test case decorator to randomly subset a cartesian product of parameters.
Forked from `absltest.parameterized.product`.
Args:
*kwargs_seqs: Each positional parameter is a sequence of keyword arg dicts;
every test case generated will include exactly one kwargs dict from each
positional parameter; these will then be merged to form an overall list
of arguments for the test case.
**testgrid: A mapping of parameter names and their possible values. Possible
values should given as either a list or a tuple.
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
for name, values in testgrid.items():
assert isinstance(values, (list, tuple)), (
'Values of {} must be given as list or tuple, found {}'.format(
name, type(values)))
prior_arg_names = set()
for kwargs_seq in kwargs_seqs:
assert ((isinstance(kwargs_seq, (list, tuple))) and
all(isinstance(kwargs, dict) for kwargs in kwargs_seq)), (
'Positional parameters must be a sequence of keyword arg'
'dicts, found {}'
.format(kwargs_seq))
if kwargs_seq:
arg_names = set(kwargs_seq[0])
assert all(set(kwargs) == arg_names for kwargs in kwargs_seq), (
'Keyword argument dicts within a single parameter must all have the '
'same keys, found {}'.format(kwargs_seq))
assert not (arg_names & prior_arg_names), (
'Keyword argument dict sequences must all have distinct argument '
'names, found duplicate(s) {}'
.format(sorted(arg_names & prior_arg_names)))
prior_arg_names |= arg_names
assert not (prior_arg_names & set(testgrid)), (
'Arguments supplied in kwargs dicts in positional parameters must not '
'overlap with arguments supplied as named parameters; found duplicate '
'argument(s) {}'.format(sorted(prior_arg_names & set(testgrid))))
# Convert testgrid into a sequence of sequences of kwargs dicts and combine
# with the positional parameters.
# So foo=[1,2], bar=[3,4] --> [[{foo: 1}, {foo: 2}], [{bar: 3, bar: 4}]]
testgrid = (tuple({k: v} for v in vs) for k, vs in testgrid.items())
testgrid = tuple(kwargs_seqs) + tuple(testgrid)
# Create all possible combinations of parameters as a cartesian product
# of parameter values.
testcases = [
dict(itertools.chain.from_iterable(case.items()
for case in cases))
for cases in itertools.product(*testgrid)
]
return parameters(testcases)
def parameters(testcases):
"""A decorator for parameterized tests randomly sampled from the list.
Adapted from `absltest.parameterized.parameters`.
Args:
testcases: an iterable of test cases.
Raises:
NoTestsError: Raised when the decorator generates no tests.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
subset = _cases_from_list(testcases)
return parameterized.parameters(subset)
class NeuralTangentsTestCase(parameterized.TestCase):
"""Testing helper class forked from JaxTestCase."""
def _assertAllClose(
self,
x,
y,
*,
check_dtypes: bool = True,
atol: Optional[float] = None,
rtol: Optional[float] = None,
canonicalize_dtypes: bool = True,
err_msg: str = ''
):
"""Assert that x and y, either arrays or nested tuples/lists, are close."""
if isinstance(x, dict):
self.assertIsInstance(y, dict)
self.assertEqual(set(x.keys()), set(y.keys()))
for k in x.keys():
self._assertAllClose(x[k], y[k], check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif _is_sequence(x) and not hasattr(x, '__array__'):
self.assertTrue(_is_sequence(y) and not hasattr(y, '__array__'))
self.assertEqual(len(x), len(y))
for x_elt, y_elt in zip(x, y):
self._assertAllClose(x_elt, y_elt, check_dtypes=check_dtypes, atol=atol,
rtol=rtol, canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg)
elif hasattr(x, '__array__') or onp.isscalar(x):
self.assertTrue(hasattr(y, '__array__') or onp.isscalar(y))
if check_dtypes:
self.assertDtypesMatch(x, y, canonicalize_dtypes=canonicalize_dtypes)
x = onp.asarray(x)
y = onp.asarray(y)
self.assertArraysAllClose(x, y, check_dtypes=False, atol=atol, rtol=rtol,
err_msg=err_msg)
elif x == y:
return
else:
raise TypeError((type(x), type(y)))
def assertArraysAllClose(
self,
x,
y,
*,
check_dtypes: bool = True,
atol: Optional[float] = None,
rtol: Optional[float] = None,
err_msg: str = ''
):
"""Assert that x and y are close (up to numerical tolerances)."""
self.assertEqual(x.shape, y.shape)
atol = max(_tolerance(_dtype(x), atol), _tolerance(_dtype(y), atol))
rtol = max(_tolerance(_dtype(x), rtol), _tolerance(_dtype(y), rtol))
_assert_numpy_allclose(x, y, atol=atol, rtol=rtol, err_msg=err_msg)
if check_dtypes:
self.assertDtypesMatch(x, y)
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes: bool = True):
if not config.x64_enabled and canonicalize_dtypes:
self.assertEqual(_dtypes.canonicalize_dtype(_dtype(x)),
_dtypes.canonicalize_dtype(_dtype(y)))
else:
self.assertEqual(_dtype(x), _dtype(y))
def assertAllClose(
self,
x,
y,
*,
check_dtypes: bool = True,
atol: Optional[float] = None,
rtol: Optional[float] = None,
canonicalize_dtypes: bool = True,
check_finite: bool = True,
err_msg=''):
if check_finite:
def is_finite(x):
self.assertTrue(np.all(np.isfinite(x)))
jax.tree_map(is_finite, x)
jax.tree_map(is_finite, y)
def assert_close(x, y):
self._assertAllClose(
x, y,
check_dtypes=check_dtypes,
atol=atol,
rtol=rtol,
canonicalize_dtypes=canonicalize_dtypes,
err_msg=err_msg,
)
if dataclasses.is_dataclass(x):
self.assertIs(type(y), type(x))
for field in dataclasses.fields(x):
key = field.name
x_value, y_value = getattr(x, key), getattr(y, key)
is_pytree_node = field.metadata.get('pytree_node', True)
if is_pytree_node:
assert_close(x_value, y_value)
else:
self.assertEqual(x_value, y_value, key)
else:
assert_close(x, y)
# Neural Tangents specific utilities.
def _jit_vmap(f: Callable) -> Callable:
return jit(vmap(f))
def update_test_tolerance(f32_tol: float = 5e-3, f64_tol: float = 1e-5):
_DEFAULT_TOLERANCE[onp.dtype(onp.float32)] = f32_tol
_DEFAULT_TOLERANCE[onp.dtype(onp.float64)] = f64_tol
def stub_out_pmap(batch: ModuleType, count: int):
# If we are using GPU or CPU stub out pmap with vmap to simulate multi-core.
if count > 0:
class xla_bridge_stub:
def device_count(self) -> int:
return count
platform = jax.default_backend()
if platform == 'gpu' or platform == 'cpu':
batch.pmap = _jit_vmap
batch.xla_bridge = xla_bridge_stub()
def _log(
relative_error: float,
absolute_error: float,
expected,
actual,
did_pass: bool
):
msg = 'PASSED' if did_pass else 'FAILED'
logging.info(f'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n'
f'\n{msg} with {relative_error} relative error \n'
f'and {absolute_error} absolute error: \n'
f'---------------------------------------------\n'
f'EXPECTED: \n'
f'{expected}\n'
f'---------------------------------------------\n'
f'ACTUAL: \n'
f'{actual}\n'
f'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n'
)
def assert_close_matrices(self, expected, actual, rtol, atol=0.1):
def assert_close(expected, actual):
self.assertEqual(expected.shape, actual.shape)
relative_error = (
np.linalg.norm(actual - expected) /
np.maximum(np.linalg.norm(expected), 1e-12))
absolute_error = np.mean(np.abs(actual - expected))
if (np.isnan(relative_error) or
relative_error > rtol or
absolute_error > atol):
_log(relative_error, absolute_error, expected, actual, False)
self.fail(self.failureException('Relative ERROR: ',
float(relative_error),
'EXPECTED:' + ' ' * 50,
expected,
'ACTUAL:' + ' ' * 50,
actual,
' ' * 50,
'Absolute ERROR: ',
float(absolute_error)))
else:
_log(relative_error, absolute_error, expected, actual, True)
jax.tree_map(assert_close, expected, actual)
def skip_test(
self,
msg: str = 'Skipping large tests for speed.',
platforms: Tuple[str, ...] = ('cpu',)
):
if jax.default_backend() in platforms:
raise parameterized.TestCase.skipTest(self, msg)
def mask(
x: np.ndarray,
mask_constant: Optional[float],
mask_axis: Sequence[int],
key: jax.random.KeyArray,
p: float) -> np.ndarray:
if mask_constant is not None:
mask_shape = [1 if i in mask_axis else s
for i, s in enumerate(x.shape)]
mask_mat = jax.random.bernoulli(key, p=p, shape=mask_shape)
x = np.where(mask_mat, mask_constant, x)
x = np.sort(x, 1)
return x
| 13,913 | 30.622727 | 80 | py |
neural-tangents | neural-tangents-main/tests/stax/elementwise_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/stax/elementwise.py`."""
import itertools
import random as prandom
from absl.testing import absltest
from jax import default_backend
from jax import grad, jacfwd, jacrev, jit, jvp, value_and_grad
from jax import random
from jax.config import config
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
test_utils.update_test_tolerance()
prandom.seed(1)
class ActivationTest(test_utils.NeuralTangentsTestCase):
def _test_activation_fc(self, phi, get):
key1, key2, key_mc = random.split(random.PRNGKey(1), 3)
x1 = np.cos(random.normal(key1, (3, 2)))
x2 = np.cos(random.normal(key2, (2, 2)))
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(1024),
phi,
stax.Dense(1 if get == 'ntk' else 1024)
)
analytic_kernel = kernel_fn(x1, x2, get, diagonal_spatial=True)
mc_kernel_fn = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=apply_fn,
key=key_mc,
n_samples=800,
implementation=2,
vmap_axes=0,
device_count=0,
)
if get == 'cov1':
empirical_kernel = np.diag(mc_kernel_fn(x1, None, 'nngp'))
else:
empirical_kernel = mc_kernel_fn(x1, x2, get)
self.assertAllClose(analytic_kernel, empirical_kernel, atol=0.01, rtol=0.05)
@test_utils.product(
phi=[
stax.Gabor,
stax.Sigmoid_like
],
get=['cov1', 'nngp', 'ntk'],
)
def test_nonparametric(
self,
phi,
get,
):
self._test_activation_fc(phi(), get)
@test_utils.product(
phi=[stax.Monomial, stax.RectifiedMonomial],
get=['cov1', 'nngp', 'ntk'],
degree=[0, 1, 2, 3, 4, 5],
)
def test_monomial(
self,
phi,
get,
degree
):
if phi == stax.RectifiedMonomial and default_backend() == 'tpu':
raise absltest.SkipTest('`NaN` issues in Rectified Monomials on TPU.')
self._test_activation_fc(phi(degree=degree), get)
@test_utils.product(
phi=[stax.Polynomial],
get=['cov1', 'nngp', 'ntk'],
coef=[
[],
[0],
[-2],
[0, 0],
[0, 1],
[1, 0],
[1, -1],
[-0.5, 1.2],
[1.3, 0, -1.2, -0.5],
[-0.1, 2.1, 0, 0, -1.2, -0.5, 0, 0]
],
)
def test_polynomial(
self,
phi,
get,
coef
):
self._test_activation_fc(phi(coef=coef), get)
@stax.layer
def _RBF(self, gamma):
init_fn = lambda key, input_shape: (input_shape, ())
def apply_fn(unused_params, unused_xs, **kwargs):
raise NotImplementedError()
def kernel_fn(kernels, **kwargs):
if kernels.ntk is not None:
raise ValueError('RBF Kernel does not have an associated NTK.')
if kernels.nngp.ndim > 2:
raise ValueError(
('RBF Kernel is not defined for covariance matrices with dimension'
' greater than two.'))
input_dim = kernels.shape1[1]
cov1 = kernels.cov1
cov1 = np.reshape(cov1, (cov1.shape[0], 1))
cov2 = cov1 if kernels.cov2 is None else kernels.cov2
cov2 = np.reshape(cov2, (1, cov2.shape[0]))
nngp = kernels.nngp
# TODO(schsam): Update cov1 and cov2 if we want to compose this kernel
# with other kernels.
return kernels.replace(
nngp=np.exp(-input_dim * gamma * (cov1 + cov2 - 2 * nngp)))
return init_fn, apply_fn, kernel_fn
def _test_activation(
self,
activation_fn,
same_inputs,
model,
get,
rbf_gamma=None
):
if 'conv' in model:
test_utils.skip_test(self)
key = random.PRNGKey(1)
key, split = random.split(key)
output_dim = 1024 if get == 'nngp' else 1
b_std = 0.5
W_std = 2.0
if activation_fn[2].__name__ == 'Sin':
W_std = 0.9
if activation_fn[2].__name__ == 'Rbf':
W_std = 1.0
b_std = 0.0
if model == 'fc':
rtol = 0.04
X0_1 = random.normal(key, (4, 2))
X0_2 = None if same_inputs else random.normal(split, (2, 2))
affine = stax.Dense(1024, W_std, b_std)
readout = stax.Dense(output_dim)
depth = 1
else:
rtol = 0.05
X0_1 = random.normal(key, (2, 4, 4, 3))
X0_2 = None if same_inputs else random.normal(split, (4, 4, 4, 3))
affine = stax.Conv(512, (3, 2), W_std=W_std, b_std=b_std, padding='SAME')
readout = stax.serial(stax.GlobalAvgPool() if 'pool' in model else
stax.Flatten(),
stax.Dense(output_dim))
depth = 2
if default_backend() == 'cpu':
num_samplings = 200
rtol *= 2
else:
num_samplings = (500 if activation_fn[2].__name__ in ('Sin', 'Rbf')
else 300)
init_fn, apply_fn, kernel_fn = stax.serial(
*[affine, activation_fn]*depth, readout)
analytic_kernel = kernel_fn(X0_1, X0_2, get)
mc_kernel_fn = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, split, num_samplings,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
empirical_kernel = mc_kernel_fn(X0_1, X0_2, get)
test_utils.assert_close_matrices(self, analytic_kernel,
empirical_kernel, rtol)
# Check match with explicit RBF
if rbf_gamma is not None and get == 'nngp' and model == 'fc':
input_dim = X0_1.shape[1]
_, _, kernel_fn = self._RBF(rbf_gamma / input_dim)
direct_rbf_kernel = kernel_fn(X0_1, X0_2, get)
test_utils.assert_close_matrices(self, analytic_kernel,
direct_rbf_kernel, rtol)
@test_utils.product(
model=[
'fc',
'conv-pool',
'conv-flatten'
],
phi_name=[
'Sin',
'Cos',
'Erf',
'Gelu',
'Sign',
],
same_inputs=[False],
get=['nngp', 'ntk'],
approximate=[True, False],
abc=list(itertools.product(
[2., 0.3],
[1.5, 0.3],
[0., -np.pi/4., np.pi/2.]
))
)
def test_activation(
self,
same_inputs,
model,
phi_name,
get,
abc,
approximate
):
if abc != [0.3, 1.5, -np.pi/4]:
test_utils.skip_test(self)
if approximate and phi_name != 'Gelu':
raise absltest.SkipTest(
f'{phi_name} does not have an `approximate parameter.')
a, b, c = abc
if phi_name == 'Sin':
activation = stax.Sin(a=a, b=b, c=c)
elif phi_name == 'Erf':
activation = stax.Erf(a=a, b=b, c=c)
elif phi_name in ['Gelu', 'Sign', 'Cos']:
if a != 0.3 or b != 0.3 or c != 0.:
raise absltest.SkipTest('Skip `Gelu/Sign/Cos` test if '
' (a, b, c) != (.3, .3, 0.).')
activation = stax.Gelu() if phi_name == 'Gelu' else stax.Sign()
else:
raise NotImplementedError(f'Activation {phi_name} is not implemented.')
self._test_activation(activation, same_inputs, model, get)
@test_utils.product(
model=[
'fc',
'conv-pool',
'conv-flatten'
],
same_inputs=[False, True],
get=['nngp', 'ntk'],
gamma=[1e-6, 1e-4, 1e-2, 1.0, 2.]
)
def test_rbf(self, same_inputs, model, get, gamma):
activation = stax.Rbf(gamma)
self._test_activation(activation, same_inputs, model, get,
rbf_gamma=gamma)
@test_utils.product(
a=[-0.5, 0.25],
b=[-0.5, -0.1, 0.1],
phi=[stax.Gaussian, stax.Exp],
same_inputs=[False, True, None],
n=[0]
)
def test_nonlineariy(self, phi, same_inputs, a, b, n):
width = 2**10
n_samples = 2**9
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(width),
phi(a=a, b=b),
stax.Dense(width),
phi(a=a, b=b),
stax.Dense(1))
key1, key2, key_mc = random.split(random.PRNGKey(1), 3)
shape = (4, 3, 2)[:n] + (1,)
x1 = np.cos(random.normal(key1, (2,) + shape))
if same_inputs is None:
x2 = None
elif same_inputs is True:
x2 = x1
else:
x2 = np.cos(random.normal(key2, (3,) + shape))
k = kernel_fn(x1, x2)
mc_kernel_fn = nt.monte_carlo_kernel_fn(init_fn, apply_fn, key_mc,
n_samples)
k_mc = mc_kernel_fn(x1, x2, ('nngp', 'ntk'))
test_utils.assert_close_matrices(self, k_mc.nngp, k.nngp, 6e-2)
test_utils.assert_close_matrices(self, k_mc.ntk, k.ntk, 6e-2)
def test_exp_normalized(self):
key = random.PRNGKey(0)
x1 = random.normal(key, (2, 6, 7, 1))
x2 = random.normal(key, (4, 6, 7, 1))
for do_clip in [True, False]:
for gamma in [1., 2., 0.5]:
for get in ['nngp', 'ntk']:
with self.subTest(do_clip=do_clip, gamma=gamma, get=get):
_, _, kernel_fn = stax.serial(
stax.Conv(1, (3, 3)),
stax.ExpNormalized(gamma, do_clip),
stax.Conv(1, (3, 3)),
stax.ExpNormalized(gamma, do_clip),
stax.GlobalAvgPool(),
stax.Dense(1)
)
k_12 = kernel_fn(x1, x2, get=get)
self.assertEqual(k_12.shape, (x1.shape[0], x2.shape[0]))
k_11 = kernel_fn(x1, None, get=get)
self.assertEqual(k_11.shape, (x1.shape[0],) * 2)
self.assertGreater(np.min(np.linalg.eigvalsh(k_11)), 0)
k_22 = kernel_fn(x2, None, get=get)
self.assertEqual(k_22.shape, (x2.shape[0],) * 2)
self.assertGreater(np.min(np.linalg.eigvalsh(k_22)), 0)
def test_exp_normalized_ntk(self):
def nngp_fn(cov12, var1, var2):
prod = np.sqrt(var1 * var2)
return prod * np.exp(cov12 / prod - 1)
_, _, kernel_fn = stax.serial(stax.Dense(1),
stax.Elementwise(nngp_fn=nngp_fn))
_, _, kernel_fn_manual = stax.serial(stax.Dense(1),
stax.ExpNormalized())
key = random.PRNGKey(1)
x1 = random.normal(key, (5, 4, 3, 1))
x2 = random.normal(key, (6, 4, 3, 1))
k = kernel_fn(x1, x2)
k_manual = kernel_fn_manual(x1, x2)
self.assertAllClose(k_manual, k)
@test_utils.product(
same_inputs=[False, True],
degree=[1, 2, 3, 4, 5, 6],
get=['ntk', 'nngp'],
readout=['pool', 'flatten']
)
def test_hermite(self, same_inputs, degree, get, readout):
key = random.PRNGKey(1)
key1, key2, key = random.split(key, 3)
if degree > 2:
width = 10000
n_samples = 5000
test_utils.skip_test(self)
else:
width = 10000
n_samples = 100
x1 = np.cos(random.normal(key1, [2, 6, 6, 3]))
x2 = x1 if same_inputs else np.cos(random.normal(key2, [3, 6, 6, 3]))
conv_layers = [
stax.Conv(width, (3, 3), W_std=2., b_std=0.5),
stax.LayerNorm(),
stax.Hermite(degree),
stax.GlobalAvgPool() if readout == 'pool' else stax.Flatten(),
stax.Dense(1) if get == 'ntk' else stax.Identity()]
init_fn, apply_fn, kernel_fn = stax.serial(*conv_layers)
analytic_kernel = kernel_fn(x1, x2, get)
mc_kernel_fn = nt.monte_carlo_kernel_fn(init_fn, apply_fn, key, n_samples)
mc_kernel = mc_kernel_fn(x1, x2, get)
rot = degree / 2. * 1e-2
test_utils.assert_close_matrices(self, mc_kernel, analytic_kernel, rot)
class ElementwiseTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
phi=[
stax.Identity(),
stax.Erf(),
stax.Sin(),
stax.Relu(),
],
same_inputs=[False, True, None],
n=[0, 1, 2],
diagonal_batch=[True, False],
diagonal_spatial=[True, False]
)
def test_elementwise(
self,
same_inputs,
phi,
n,
diagonal_batch,
diagonal_spatial
):
fn = lambda x: phi[1]((), x)
name = phi[0].__name__
def nngp_fn(cov12, var1, var2):
if 'Identity' in name:
res = cov12
elif 'Erf' in name:
prod = (1 + 2 * var1) * (1 + 2 * var2)
res = np.arcsin(2 * cov12 / np.sqrt(prod)) * 2 / np.pi
elif 'Sin' in name:
sum_ = (var1 + var2)
s1 = np.exp((-0.5 * sum_ + cov12))
s2 = np.exp((-0.5 * sum_ - cov12))
res = (s1 - s2) / 2
elif 'Relu' in name:
prod = var1 * var2
sqrt = np.sqrt(np.maximum(prod - cov12 ** 2, 1e-30))
angles = np.arctan2(sqrt, cov12)
dot_sigma = (1 - angles / np.pi) / 2
res = sqrt / (2 * np.pi) + dot_sigma * cov12
else:
raise NotImplementedError(name)
return res
_, _, kernel_fn = stax.serial(stax.Dense(1), stax.Elementwise(fn, nngp_fn),
stax.Dense(1), stax.Elementwise(fn, nngp_fn))
_, _, kernel_fn_manual = stax.serial(stax.Dense(1), phi,
stax.Dense(1), phi)
key = random.PRNGKey(1)
shape = (4, 3, 2)[:n] + (1,)
x1 = random.normal(key, (5,) + shape)
if same_inputs is None:
x2 = None
elif same_inputs is True:
x2 = x1
else:
x2 = random.normal(key, (6,) + shape)
kwargs = dict(diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial)
k = kernel_fn(x1, x2, **kwargs)
k_manual = kernel_fn_manual(x1, x2, **kwargs).replace(is_gaussian=False)
self.assertAllClose(k_manual, k)
class ElementwiseNumericalTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
model=[
'fc',
'conv-pool',
'conv-flatten'
],
phi=[
stax.Erf(),
stax.Gelu(),
stax.Sin(),
],
same_inputs=[False, True],
get=['nngp', 'ntk']
)
def test_elementwise_numerical(self, same_inputs, model, phi, get):
if 'conv' in model:
test_utils.skip_test(self)
key, split = random.split(random.PRNGKey(1))
output_dim = 1
b_std = 0.01
W_std = 1.0
rtol = 2e-3
deg = 25
if get == 'ntk':
rtol *= 2
if default_backend() == 'tpu':
rtol *= 2
if model == 'fc':
X0_1 = random.normal(key, (3, 7))
X0_2 = None if same_inputs else random.normal(split, (5, 7))
affine = stax.Dense(1024, W_std, b_std)
readout = stax.Dense(output_dim)
depth = 1
else:
X0_1 = random.normal(key, (2, 8, 8, 3))
X0_2 = None if same_inputs else random.normal(split, (3, 8, 8, 3))
affine = stax.Conv(1024, (3, 2), W_std=W_std, b_std=b_std, padding='SAME')
readout = stax.serial(stax.GlobalAvgPool() if 'pool' in model else
stax.Flatten(),
stax.Dense(output_dim))
depth = 2
_, _, kernel_fn = stax.serial(*[affine, phi] * depth, readout)
analytic_kernel = kernel_fn(X0_1, X0_2, get)
fn = lambda x: phi[1]((), x)
_, _, kernel_fn = stax.serial(
*[affine, stax.ElementwiseNumerical(fn, deg=deg)] * depth, readout)
numerical_activation_kernel = kernel_fn(X0_1, X0_2, get)
test_utils.assert_close_matrices(self, analytic_kernel,
numerical_activation_kernel, rtol)
@test_utils.product(
same_inputs=[True, False],
do_stabilize=[True, False],
)
class ABReluTest(test_utils.NeuralTangentsTestCase):
def test_ab_relu_relu(self, same_inputs, do_stabilize):
key = random.PRNGKey(1)
X0_1 = random.normal(key, (3, 2))
fc = stax.Dense(5, 1, 0)
# Test that ABRelu(0, 1) == ReLU
init_fn, apply_relu, kernel_fn_relu = stax.serial(fc, stax.Relu())
_, params = init_fn(key, input_shape=X0_1.shape)
X0_2 = None if same_inputs else random.normal(key, (4, 2))
for a, b in [(0, 1), (0, -1), (-1, 0), (1, 0)]:
with self.subTest(a=a, b=b):
_, apply_ab_relu, kernel_fn_ab_relu = stax.serial(
fc, stax.ABRelu(a, b, do_stabilize=do_stabilize))
X1_1_relu = (b - a) * apply_relu(params, X0_1 * (-1 if a != 0 else 1))
X1_1_ab_relu = apply_ab_relu(params, X0_1)
self.assertAllClose(X1_1_relu, X1_1_ab_relu)
kernels_relu = kernel_fn_relu(X0_1, X0_2)
kernels_ab_relu = kernel_fn_ab_relu(X0_1, X0_2)
self.assertAllClose(kernels_relu, kernels_ab_relu)
def test_ab_relu_id(self, same_inputs, do_stabilize):
key = random.PRNGKey(1)
X0_1 = random.normal(key, (3, 2))
fc = stax.Dense(5, 1, 0)
X0_2 = None if same_inputs else random.normal(key, (4, 2))
# Test that ABRelu(a, a) == a * Identity
init_fn, apply_id, kernel_fn_id = stax.serial(fc, stax.Identity())
_, params = init_fn(key, input_shape=X0_1.shape)
for a in [-5, -1, -0.5, 0, 0.5, 1, 5]:
with self.subTest(a=a):
_, apply_ab_relu, kernel_fn_ab_relu = stax.serial(
fc, stax.ABRelu(a, a, do_stabilize=do_stabilize))
X1_1_id = a * apply_id(params, X0_1)
X1_1_ab_relu = apply_ab_relu(params, X0_1)
self.assertAllClose(X1_1_id, X1_1_ab_relu)
kernels_id = kernel_fn_id(X0_1 * a, None if X0_2 is None else a * X0_2)
kernels_ab_relu = kernel_fn_ab_relu(X0_1, X0_2)
# Manually correct the value of `is_gaussian` because
# `ab_relu` (incorrectly) sets `is_gaussian=False` when `a==b`.
kernels_ab_relu = kernels_ab_relu.replace(is_gaussian=True)
self.assertAllClose(kernels_id, kernels_ab_relu)
def test_leaky_relu(self, same_inputs, do_stabilize):
key = random.PRNGKey(1)
X0_1 = random.normal(key, (3, 2))
fc = stax.Dense(5, 1, 0)
X0_2 = None if same_inputs else random.normal(key, (4, 2))
# Test that ABRelu(alpha, 1) == LeakyRelu(alpha)
for a in [-2, -1, 0, 1, 2]:
with self.subTest(alpha=a):
init_fn, apply_leaky_relu, kernel_fn_leaky_relu = stax.serial(
fc, stax.LeakyRelu(a, do_stabilize=do_stabilize))
_, apply_ab_relu, kernel_fn_ab_relu = stax.serial(fc, stax.ABRelu(a, 1))
_, params = init_fn(key, input_shape=X0_1.shape)
X1_1_leaky_relu = apply_leaky_relu(params, X0_1)
X1_1_ab_relu = apply_ab_relu(params, X0_1)
self.assertAllClose(X1_1_leaky_relu, X1_1_ab_relu)
kernels_leaky_relu = kernel_fn_leaky_relu(X0_1, X0_2)
kernels_ab_relu = kernel_fn_ab_relu(X0_1, X0_2)
self.assertAllClose(kernels_leaky_relu, kernels_ab_relu)
def test_abs(self, same_inputs, do_stabilize):
key = random.PRNGKey(1)
X0_1 = random.normal(key, (3, 2))
fc = stax.Dense(5, 1, 0)
X0_2 = None if same_inputs else random.normal(key, (4, 2))
# Test that Abs == ABRelu(-1, 1)
init_fn, apply_leaky_relu, kernel_fn_abs = stax.serial(
fc, stax.Abs(do_stabilize=do_stabilize))
_, apply_ab_relu, kernel_fn_ab_relu = stax.serial(fc, stax.ABRelu(-1, 1))
_, params = init_fn(key, input_shape=X0_1.shape)
X1_1_abs = apply_leaky_relu(params, X0_1)
X1_1_ab_relu = apply_ab_relu(params, X0_1)
self.assertAllClose(X1_1_abs, X1_1_ab_relu)
kernels_abs = kernel_fn_abs(X0_1, X0_2, ('nngp', 'ntk'))
kernels_ab_relu = kernel_fn_ab_relu(X0_1, X0_2, ('nngp', 'ntk'))
self.assertAllClose(kernels_abs, kernels_ab_relu)
class AutodiffTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
get=[
'ntk',
'nngp'
],
same_inputs=[True, False, None],
phi=[
stax.Erf,
stax.Sin,
stax.Gelu,
stax.Relu,
stax.ElementwiseNumerical
]
)
def test_autodiff(self, get, same_inputs, phi):
x1 = np.cos(random.normal(random.PRNGKey(1), (3, 1, 2, 3)))
if same_inputs is None:
x2 = None
elif same_inputs is True:
x2 = x1
else:
x2 = np.cos(random.normal(random.PRNGKey(2), (4, 1, 2, 3)))
name = phi.__name__
if name == 'LeakyRelu':
phi = phi(0.1)
elif name == 'ElementwiseNumerical':
phi = phi(fn=np.cos, deg=25)
else:
phi = phi()
_, _, kernel_fn = stax.serial(stax.Dense(1, 2., 0.01), phi,
stax.Dense(1, 2., 0.01), phi)
def k(x1, x2):
return kernel_fn(x1, x2, get)
dx1 = random.normal(random.PRNGKey(3), x1.shape) * 0.01
if x2 is None:
dx2 = None
else:
dx2 = random.normal(random.PRNGKey(4), x2.shape) * 0.01
def dk(x1, x2):
return jvp(k, (x1, x2), (dx1, dx2))[1]
def d2k(x1, x2):
return jvp(dk, (x1, x2), (dx1, dx2))[1]
_dk = dk(x1, x2)
_d2k = d2k(x1, x2)
if same_inputs is not False and get == 'ntk' and 'Relu' in name:
tol = 8e-3
else:
tol = 2e-3 if name == 'ElementwiseNumerical' else 1e-4
def assert_close(x, y, tol=3e-5):
if default_backend() == 'tpu':
# TODO(romann): understand why TPUs have high errors.
tol = 0.21
self.assertLess(
np.max(np.abs(x - y)) / (np.mean(np.abs(x)) + np.mean(np.abs(y))),
tol)
# k(x + dx) ~ k(x) + dk(x) dx + dx^T d2k(x) dx
assert_close(k(x1 + dx1, None if same_inputs is None else x2 + dx2),
k(x1, x2) + _dk + _d2k / 2,
tol=tol)
# d/dx1
k_fwd_0 = jacfwd(k)(x1, x2)
k_rev_0 = jacrev(k)(x1, x2)
assert_close(k_fwd_0, k_rev_0)
if same_inputs is not None:
# d/dx2
k_fwd_1 = jacfwd(k, 1)(x1, x2)
k_rev_1 = jacrev(k, 1)(x1, x2)
assert_close(k_fwd_1, k_rev_1)
# dk(x2, x1)/dx2 = dk(x1, x2)/dx1
k_fwd_01 = jacfwd(k, 1)(x2, x1)
k_rev_01 = jacrev(k, 1)(x2, x1)
assert_close(np.moveaxis(k_fwd_0, (0, 2, 4), (1, 3, 5)), k_fwd_01)
assert_close(np.moveaxis(k_rev_0, (0, 2, 4), (1, 3, 5)), k_rev_01)
# dk(x2, x1)/dx1 = dk(x1, x2)/dx2
k_fwd_10 = jacfwd(k)(x2, x1)
k_rev_10 = jacrev(k)(x2, x1)
assert_close(np.moveaxis(k_fwd_1, (0, 2, 4), (1, 3, 5)), k_fwd_10)
assert_close(np.moveaxis(k_rev_1, (0, 2, 4), (1, 3, 5)), k_rev_10)
@test_utils.product(
get=[
'ntk',
'nngp'
],
parameterization=[
'standard',
'ntk'
],
parameterization_out=[
'ntk'
],
do_jit=[
True,
],
x1_type=[
'zeros',
'ones',
'random',
],
x2_type=[
'zeros',
'ones',
'random',
'x1',
'none',
],
b_std=[
None,
0.1,
],
phi=[
stax.Identity,
stax.Erf,
stax.Abs,
stax.Gelu,
stax.Relu,
stax.Sigmoid_like,
stax.ABRelu,
stax.Exp,
stax.ExpNormalized,
stax.Gaussian,
stax.Sign,
stax.Rbf,
stax.Cos,
stax.Sin
]
)
def test_activations(
self,
get,
parameterization,
parameterization_out,
x1_type,
x2_type,
b_std,
phi,
do_jit
):
"""Tests forward- and reverse-mode autodiff for nonlinearities."""
if phi == stax.ABRelu:
phi_ = phi(0.25, 0.5)
else:
phi_ = phi()
if phi not in [stax.Relu]:
test_utils.skip_test(self)
n_out = 1 if get == 'ntk' else 1024
width = 612
W_std_in = width**(-0.5) if parameterization_out == 'standard' else 1.
if phi == stax.Exp:
W_std_in /= 10.
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(
width,
W_std=W_std_in,
b_std=b_std,
parameterization=parameterization),
phi_,
stax.Dense(
n_out,
b_std=b_std,
parameterization=parameterization_out
),
)
def get_x(x_type, key):
shape = (1, 2)
if x_type == 'zeros':
x = np.zeros(shape)
elif x_type == 'ones':
x = np.ones(shape)
elif x_type == 'random':
x = random.normal(random.PRNGKey(key), shape)
elif x_type == 'sin':
x = np.sin(random.normal(random.PRNGKey(key), shape))
elif x_type == 'none':
return None
else:
raise ValueError(x_type)
return x
x1 = get_x(x1_type, 1)
if x2_type == 'x1':
x2 = x1
else:
x2 = get_x(x2_type, 2)
def kernel_scalar(x1, x2):
return kernel_fn(x1, x2, get)[0, 0]
if do_jit:
kernel_scalar = jit(kernel_scalar)
k1 = kernel_scalar(x1, x2)
k2, k2_grad = value_and_grad(kernel_scalar)(x1, x2)
self.assertAllClose(k1, k2)
# Compare to forward-mode.
k2_fwd, _ = jvp(kernel_scalar, (x1, x2), (x1, x2))
k2_grad_fwd = jacfwd(kernel_scalar)(x1, x2)
self.assertAllClose(k1, k2_fwd)
self.assertAllClose(k2_grad, k2_grad_fwd)
# `stax.ExpNormalized` has no forward pass.
# `stax.Sign` is discontinuous at `0`, so NTK MC kernel does not converge to
# infinite-width kernel.
if phi == stax.ExpNormalized or (get == 'ntk' and phi == stax.Sign):
raise absltest.SkipTest('Not comparing against MC kernels.')
_kernel_scalar_mc = nt.monte_carlo_kernel_fn(
init_fn,
apply_fn,
key=random.PRNGKey(3),
n_samples=1,
device_count=0,
)
def kernel_scalar_mc(x1, x2):
return _kernel_scalar_mc(x1, x2, get)[0, 0]
k_mc = kernel_scalar_mc(x1, x2)
k_mc2, k_mc2_grad = value_and_grad(kernel_scalar_mc)(x1, x2)
self.assertAllClose(k_mc, k_mc2)
# Compare MC to forward-mode.
k_mc2_fwd, _ = jvp(kernel_scalar_mc, (x1, x2), (x1, x2))
k_mc2_grad_fwd = jacfwd(kernel_scalar_mc)(x1, x2)
self.assertAllClose(k_mc, k_mc2_fwd)
self.assertAllClose(k_mc2_grad, k_mc2_grad_fwd)
def kernel_fn_emp(x1, x2, get, params):
return nt.empirical_kernel_fn(apply_fn)(x1, x2, get, params)[0, 0]
kernel_fn_emp_g = jit(value_and_grad(kernel_fn_emp), static_argnames='get')
def kernel_scalar_mc_grad_mean(x1, x2):
key = random.PRNGKey(4)
n_samples = 2**9
k, k_grad = 0., 0.
for _ in range(n_samples):
_, params = init_fn(key, x1.shape)
k_mc2, k_mc2_grad = kernel_fn_emp_g(x1, x2, get, params)
k += k_mc2
k_grad += k_mc2_grad
key, _ = random.split(key)
k /= n_samples
k_grad /= n_samples
return k, k_grad
k_mc2_mean, k_mc2_grad_mean = kernel_scalar_mc_grad_mean(x1, x2)
# Compare kernels.
self.assertAllClose(k1, k_mc2_mean, atol=8e-3, rtol=4e-2)
if phi == stax.Sign and get == 'nngp':
raise absltest.SkipTest('Derivative of the empirical NNGP of a '
'discontinuous function does not converge '
'to the derivative of the infinite width NNGP.')
if (phi in [stax.Abs, stax.Relu, stax.LeakyRelu, stax.ABRelu] and
get == 'ntk'):
raise absltest.SkipTest('Derivative of the empirical NTK of a '
'non-differentiable function does not converge '
'to the derivative of the infinite width NTK.')
atol = 1e-2
# Compare gradient of the analytic kernel to empirical kernel.
if np.max(np.abs(k2_grad - k_mc2_grad_mean)) > atol:
test_utils.assert_close_matrices(self,
k_mc2_grad_mean,
k2_grad,
rtol=0.05,
atol=10.)
@test_utils.product(
architecture=[
'conv',
'wrn'
],
get=[
'ntk',
'nngp'
],
do_jit=[
True,
]
)
def test_issue_123(
self,
get,
architecture,
do_jit
):
"""Tests https://github.com/google/neural-tangents/issues/123."""
if architecture == 'wrn':
# https://github.com/google/neural-tangents/issues/123#issue-992927376
def WideResnetBlock(channels, strides=(1, 1), channel_mismatch=False):
main = stax.serial(
stax.Relu(),
stax.Conv(
channels, (3, 3), strides, padding='SAME',
parameterization='standard'
),
stax.Relu(),
stax.Conv(channels, (3, 3), padding='SAME',
parameterization='standard'),
)
shortcut = (
stax.Identity()
if not channel_mismatch
else stax.Conv(
channels, (3, 3), strides, padding='SAME',
parameterization='standard'
)
)
return stax.serial(stax.FanOut(2), stax.parallel(main, shortcut),
stax.FanInSum())
def WideResnetGroup(n, channels, strides=(1, 1)):
blocks = []
blocks += [WideResnetBlock(channels, strides, channel_mismatch=True)]
for _ in range(n - 1):
blocks += [WideResnetBlock(channels, (1, 1))]
return stax.serial(*blocks)
def WideResnet(block_size, k, num_classes):
return stax.serial(
stax.Conv(16, (3, 3), padding='SAME', parameterization='standard'),
WideResnetGroup(block_size, int(16 * k)),
WideResnetGroup(block_size, int(32 * k), (2, 2)),
WideResnetGroup(block_size, int(64 * k), (2, 2)),
stax.AvgPool((8, 8), padding='SAME'),
stax.Flatten(),
stax.Dense(num_classes, 1.0, 0.0, parameterization='standard'),
)
init_fn, apply_fn, kernel_fn = WideResnet(block_size=1,
k=1,
num_classes=1)
elif architecture == 'conv':
# https://github.com/google/neural-tangents/issues/123#issuecomment-932809224
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Conv(
1,
(3, 3)
),
stax.Relu(),
stax.Flatten(),
)
else:
raise ValueError(architecture)
x1 = x2 = np.zeros((1, 8, 8, 3))
def kernel_scalar(x1, x2):
return kernel_fn(x1, x2, get)[0, 0]
if do_jit:
kernel_scalar = jit(kernel_scalar)
# Compare forward pass to `value_and_grad`.
k1 = kernel_scalar(x1, x2)
k2, k2_grad = value_and_grad(kernel_scalar)(x1, x2)
self.assertAllClose(k1, k2)
# Compare to forward-mode.
k2_fwd, _ = jvp(kernel_scalar, (x1, x2), (x1, x2))
k2_grad_fwd = jacfwd(kernel_scalar)(x1, x2)
self.assertAllClose(k1, k2_fwd)
self.assertAllClose(k2_grad, k2_grad_fwd)
# Compare to 0.
self.assertAllClose(grad(kernel_scalar)(x1, x2), np.zeros_like(x1))
if __name__ == '__main__':
absltest.main()
| 31,260 | 29.058654 | 83 | py |
neural-tangents | neural-tangents-main/tests/stax/stax_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/stax.py`."""
import functools
import itertools
import random as prandom
from typing import Tuple
from absl.testing import absltest
from jax import default_backend
from jax import jit
from jax import random
from jax.config import config
from jax.example_libraries import stax as ostax
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
from tests import test_utils
import numpy as onp
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
MODELS = [
'fc',
'conv'
]
BATCH_SIZE = 4
INPUT_SHAPE = (BATCH_SIZE, 8, 6, 2)
WIDTHS = [2**10]
N_SAMPLES = 100
RTOL = 0.041
ATOL = 0.1
FILTER_SHAPES = [
(2, 1),
(3, 2)
]
PADDINGS = [
'SAME',
'VALID',
'CIRCULAR'
]
STRIDES = [
(1, 2),
(2, 1),
]
ACTIVATIONS = [stax.Relu()]
PROJECTIONS = [
'FLAT',
'POOL',
'ATTN',
]
test_utils.update_test_tolerance()
prandom.seed(1)
def _get_inputs(
key,
same_inputs,
shape,
fn=np.cos
) -> Tuple[np.ndarray, np.ndarray]:
key, split = random.split(key)
x1 = fn(random.normal(key, shape))
batch_axis = shape.index(BATCH_SIZE)
shape = shape[:batch_axis] + (2 * BATCH_SIZE,) + shape[batch_axis + 1:]
x2 = None if same_inputs else fn(random.normal(split, shape)) * 2
return x1, x2 # pytype: disable=bad-return-type # jax-ndarray
def _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res, padding,
phi, strides, width, is_ntk, proj_into_2d, pool_type, layer_norm,
parameterization, s, use_dropout):
if is_conv:
# Select a random filter order.
default_filter_spec = 'HW'
filter_specs = [''.join(p) for p in itertools.permutations('HWIO')]
filter_spec = prandom.choice(filter_specs)
filter_shape = tuple(filter_shape[default_filter_spec.index(c)]
for c in filter_spec if c in default_filter_spec)
strides = tuple(strides[default_filter_spec.index(c)]
for c in filter_spec if c in default_filter_spec)
# Select the activation order.
default_spec = 'NHWC'
if default_backend() == 'tpu':
# Keep batch dimension leading for TPU for batching to work.
specs = ['N' + ''.join(p) for p in itertools.permutations('CHW')]
else:
specs = [''.join(p) for p in itertools.permutations('NCHW')]
spec = prandom.choice(specs)
input_shape = tuple(INPUT_SHAPE[default_spec.index(c)] for c in spec)
else:
input_shape = (INPUT_SHAPE[0], onp.prod(INPUT_SHAPE[1:]))
if default_backend() == 'tpu':
spec = 'NC'
else:
spec = prandom.choice(['NC', 'CN'])
if spec.index('N') == 1:
input_shape = input_shape[::-1]
filter_spec = None
dimension_numbers = (spec, filter_spec, spec)
batch_axis, channel_axis = spec.index('N'), spec.index('C')
spec_fc = ''.join(c for c in spec if c in ('N', 'C'))
batch_axis_fc, channel_axis_fc = spec_fc.index('N'), spec_fc.index('C')
if not is_conv:
batch_axis = batch_axis_fc
channel_axis = channel_axis_fc
if layer_norm:
layer_norm = tuple(spec.index(c) for c in layer_norm)
def fc(out_dim, s):
return stax.Dense(
out_dim=out_dim,
W_std=W_std,
b_std=b_std,
parameterization=parameterization,
s=s,
batch_axis=batch_axis_fc,
channel_axis=channel_axis_fc
)
def conv(out_chan, s):
return stax.Conv(
out_chan=out_chan,
filter_shape=filter_shape,
strides=strides,
padding=padding,
W_std=W_std,
b_std=b_std,
dimension_numbers=dimension_numbers,
parameterization=parameterization,
s=s
)
affine = conv(width, (s, s)) if is_conv else fc(width, (s, s))
affine_bottom = conv(width, (1, s)) if is_conv else fc(width, (1, s))
rate = onp.random.uniform(0.5, 0.9)
dropout = stax.Dropout(rate, mode='train')
if pool_type == 'AVG':
pool_fn = stax.AvgPool
global_pool_fn = stax.GlobalAvgPool
elif pool_type == 'SUM':
pool_fn = stax.SumPool
global_pool_fn = stax.GlobalSumPool
else:
raise ValueError(pool_type)
if use_pooling:
pool_or_identity = pool_fn((2, 3),
None,
'SAME' if padding == 'SAME' else 'CIRCULAR',
batch_axis=batch_axis,
channel_axis=channel_axis)
else:
pool_or_identity = stax.Identity()
dropout_or_identity = dropout if use_dropout else stax.Identity()
layer_norm_or_identity = (stax.Identity() if layer_norm is None else
stax.LayerNorm(axis=layer_norm,
batch_axis=batch_axis,
channel_axis=channel_axis))
res_unit = stax.serial(dropout_or_identity, affine, pool_or_identity)
if is_res:
block = stax.serial(
affine_bottom,
stax.FanOut(2),
stax.parallel(stax.Identity(),
res_unit),
stax.FanInSum(),
layer_norm_or_identity,
phi)
else:
block = stax.serial(
affine_bottom,
res_unit,
layer_norm_or_identity,
phi)
if proj_into_2d == 'FLAT':
proj_layer = stax.Flatten(batch_axis, batch_axis_fc)
elif proj_into_2d == 'POOL':
proj_layer = global_pool_fn(batch_axis, channel_axis)
elif proj_into_2d.startswith('ATTN'):
n_heads = int(np.sqrt(width))
n_chan_val = int(np.round(float(width) / n_heads))
proj_layer = stax.serial(
stax.GlobalSelfAttention(
n_chan_out=width,
n_chan_key=width,
n_chan_val=n_chan_val,
n_heads=n_heads,
linear_scaling=True,
W_key_std=W_std,
W_value_std=W_std,
W_query_std=W_std,
W_out_std=1.0,
b_std=b_std,
batch_axis=batch_axis,
channel_axis=channel_axis),
stax.Flatten(batch_axis, batch_axis_fc))
else:
raise ValueError(proj_into_2d)
readout = stax.serial(proj_layer,
fc(1 if is_ntk else width, (s, 1 if is_ntk else s)))
device_count = -1 if spec.index('N') == 0 else 0
net = stax.serial(block, readout)
return net, input_shape, device_count, channel_axis_fc
def _get_net_pool(width, is_ntk, pool_type, padding,
filter_shape, strides, normalize_edges):
W_std, b_std = 2.**0.5, 0.5**0.5
phi = stax.Relu()
parameterization = 'ntk'
fc = functools.partial(
stax.Dense,
W_std=W_std / width if pool_type == 'SUM' else W_std,
b_std=b_std,
parameterization=parameterization)
conv = functools.partial(
stax.Conv,
filter_shape=filter_shape,
strides=None,
padding='SAME',
W_std=W_std / onp.prod(filter_shape) if pool_type == 'SUM' else W_std,
b_std=b_std,
parameterization=parameterization)
if pool_type == 'AVG':
pool_fn = functools.partial(stax.AvgPool, normalize_edges=normalize_edges)
global_pool_fn = stax.GlobalAvgPool
elif pool_type == 'SUM':
pool_fn = stax.SumPool
global_pool_fn = stax.GlobalSumPool
else:
raise ValueError(pool_type)
pool = pool_fn(filter_shape, strides, padding)
device_count = -1
return stax.serial(
conv(width),
phi,
pool,
conv(width),
phi,
global_pool_fn(),
fc(1 if is_ntk else width)
), INPUT_SHAPE, device_count, -1
def _check_agreement_with_empirical(
self,
net,
same_inputs,
use_dropout,
is_ntk,
rtol=RTOL,
atol=ATOL
):
((init_fn, apply_fn, kernel_fn),
input_shape, device_count, channel_axis) = net
num_samples = N_SAMPLES * 5 if use_dropout else N_SAMPLES
key = random.PRNGKey(1)
x1, x2 = _get_inputs(key, same_inputs, input_shape)
if default_backend() == 'tpu' and use_dropout:
# including a test case for tpu + dropout with (parallel + batching)
batch_size = 2
else:
batch_size = 0
x1_out_shape, params = init_fn(key, x1.shape)
if same_inputs:
assert x2 is None
if x2 is None:
x2_out_shape = x1_out_shape
else:
x2_out_shape, params = init_fn(key, x2.shape)
del params
def _get_empirical(n_samples, get):
kernel_fn_empirical = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=apply_fn,
key=key,
n_samples=n_samples,
device_count=device_count,
trace_axes=(channel_axis,),
batch_size=batch_size,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
if same_inputs:
assert x2 is None
return kernel_fn_empirical(x1, x2, get)
if is_ntk:
exact, shape1, shape2 = kernel_fn(x1, x2, ('ntk', 'shape1', 'shape2'))
empirical = _get_empirical(num_samples, 'ntk')
else:
exact, shape1, shape2 = kernel_fn(x1, x2, ('nngp', 'shape1', 'shape2'))
empirical = _get_empirical(num_samples, 'nngp')
test_utils.assert_close_matrices(self, exact, empirical, rtol, atol)
self.assertEqual(shape1, x1_out_shape)
self.assertEqual(shape2, x2_out_shape)
class StaxTest(test_utils.NeuralTangentsTestCase):
def _skip_test(self, filter_shape, is_conv, is_res, padding, proj_into_2d,
strides, use_pooling):
if is_conv:
test_utils.skip_test(self)
if (is_res and is_conv and ((strides is not None and strides != (1, 1)) or
(padding == 'VALID' and filter_shape !=
(1, 1)))):
raise absltest.SkipTest('Different paths in a residual models need to '
'return outputs of the same shape.')
elif (filter_shape != FILTER_SHAPES[0] or padding != PADDINGS[0] or
strides != STRIDES[0] or proj_into_2d != PROJECTIONS[0] or
use_pooling):
raise absltest.SkipTest('FC models do not have these parameters.')
@test_utils.product(
model=MODELS,
width=WIDTHS,
phi=ACTIVATIONS,
same_inputs=[False],
padding=PADDINGS,
strides=STRIDES,
filter_shape=FILTER_SHAPES,
use_pooling=[False, True],
is_ntk=[False, True],
is_res=[False, True],
proj_into_2d=PROJECTIONS,
)
def test_exact(
self,
model,
width,
strides,
padding,
phi,
same_inputs,
filter_shape,
use_pooling,
is_ntk,
is_res,
proj_into_2d
):
is_conv = 'conv' in model
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
self._skip_test(filter_shape, is_conv, is_res, padding, proj_into_2d,
strides, use_pooling)
pool_type = 'AVG'
W_std, b_std = 2.**0.5, 0.5**0.5
layer_norm = None
parameterization = 'ntk'
use_dropout = False
net = _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res,
padding, phi, strides, width, is_ntk, proj_into_2d,
pool_type, layer_norm, parameterization, 1, use_dropout)
_check_agreement_with_empirical(
self, net, same_inputs, use_dropout, is_ntk, RTOL, 1.2)
@test_utils.product(
model=MODELS,
width=WIDTHS,
same_inputs=[False],
is_ntk=[False, True],
proj_into_2d=PROJECTIONS[:2],
layer_norm=[
'C',
'HC',
'CHW',
'NC',
'NWC',
'NCHW'
],
)
def test_layernorm(
self,
model,
width,
same_inputs,
is_ntk,
proj_into_2d,
layer_norm
):
is_conv = 'conv' in model
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
if is_conv:
test_utils.skip_test(self)
elif proj_into_2d != PROJECTIONS[0] or layer_norm not in ('C', 'NC'):
raise absltest.SkipTest('FC models do not have these parameters.')
W_std, b_std = 2.**0.5, 0.5**0.5
filter_shape = FILTER_SHAPES[0]
padding = PADDINGS[0]
strides = STRIDES[0]
phi = stax.Relu()
use_pooling, is_res = False, False
parameterization = 'ntk'
pool_type = 'AVG'
use_dropout = False
net = _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res,
padding, phi, strides, width, is_ntk, proj_into_2d,
pool_type, layer_norm, parameterization, 1, use_dropout)
_check_agreement_with_empirical(self, net, same_inputs, use_dropout, is_ntk,
0.07)
@test_utils.product(
width=WIDTHS,
same_inputs=[False],
is_ntk=[False, True],
pool_type=[
'SUM',
'AVG'
],
padding=PADDINGS,
filter_shape=FILTER_SHAPES,
strides=STRIDES,
normalize_edges=[True, False]
)
def test_pool(
self,
width,
same_inputs,
is_ntk,
pool_type,
padding,
filter_shape,
strides,
normalize_edges
):
use_dropout = False
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
test_utils.skip_test(self)
if pool_type == 'SUM' and normalize_edges:
raise absltest.SkipTest('normalize_edges not applicable to SumPool.')
net = _get_net_pool(width, is_ntk, pool_type,
padding, filter_shape, strides, normalize_edges)
_check_agreement_with_empirical(self, net, same_inputs, use_dropout, is_ntk)
def test_avg_pool(self):
X1 = np.ones((4, 2, 3, 2))
X2 = np.ones((3, 2, 3, 2))
_, apply_fn, kernel_fn = stax.AvgPool((2, 2), (1, 1), 'SAME',
normalize_edges=False)
_, apply_fn_norm, kernel_fn_norm = stax.AvgPool((2, 2), (1, 1), 'SAME',
normalize_edges=True)
_, apply_fn_stax = ostax.AvgPool((2, 2), (1, 1), 'SAME')
out1 = apply_fn((), X1)
out2 = apply_fn((), X2)
out1_norm = apply_fn_norm((), X1)
out2_norm = apply_fn_norm((), X2)
out1_stax = apply_fn_stax((), X1)
out2_stax = apply_fn_stax((), X2)
self.assertAllClose((out1_stax, out2_stax), (out1_norm, out2_norm))
out_unnorm = np.array([[1., 1., 0.5], [0.5, 0.5, 0.25]]).reshape(
(1, 2, 3, 1))
out1_unnormalized = np.broadcast_to(out_unnorm, X1.shape)
out2_unnormalized = np.broadcast_to(out_unnorm, X2.shape)
self.assertAllClose((out1_unnormalized, out2_unnormalized), (out1, out2))
ker = kernel_fn(X1, X2)
ker_norm = kernel_fn_norm(X1, X2)
self.assertAllClose(np.ones_like(ker_norm.nngp), ker_norm.nngp)
self.assertAllClose(np.ones_like(ker_norm.cov1), ker_norm.cov1)
self.assertAllClose(np.ones_like(ker_norm.cov2), ker_norm.cov2)
self.assertEqual(ker_norm.nngp.shape, ker.nngp.shape)
self.assertEqual(ker_norm.cov1.shape, ker.cov1.shape)
self.assertEqual(ker_norm.cov2.shape, ker.cov2.shape)
ker_unnorm = np.outer(out_unnorm, out_unnorm).reshape((2, 3, 2, 3))
ker_unnorm = np.transpose(ker_unnorm, axes=(0, 2, 1, 3))
nngp = np.broadcast_to(
ker_unnorm.reshape((1, 1) + ker_unnorm.shape), ker.nngp.shape)
cov1 = np.broadcast_to(np.expand_dims(ker_unnorm, 0), ker.cov1.shape)
cov2 = np.broadcast_to(np.expand_dims(ker_unnorm, 0), ker.cov2.shape)
self.assertAllClose((nngp, cov1, cov2), (ker.nngp, ker.cov1, ker.cov2))
@test_utils.product(
model=MODELS,
width=WIDTHS,
same_inputs=[True, False],
phi=ACTIVATIONS,
padding=['SAME'],
strides=STRIDES,
filter_shape=[(2, 1)],
is_ntk=[True, False],
use_pooling=[True, False],
proj_into_2d=['FLAT', 'POOL']
)
def test_dropout(
self,
model,
width,
same_inputs,
is_ntk,
padding,
strides,
filter_shape,
phi,
use_pooling,
proj_into_2d
):
pool_type = 'AVG'
use_dropout = True
is_conv = 'conv' in model
is_res = False
W_std, b_std = 2.**0.5, 0.5**0.5
layer_norm = None
parameterization = 'ntk'
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
self._skip_test(filter_shape, is_conv, is_res, padding, proj_into_2d,
strides, use_pooling)
net = _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res,
padding, phi, strides, width, is_ntk, proj_into_2d,
pool_type, layer_norm, parameterization, 1, use_dropout)
_check_agreement_with_empirical(self, net, same_inputs, use_dropout, is_ntk)
@test_utils.product(
act=['erf', 'relu'],
do_stabilize=[True, False],
kernel=['nngp', 'ntk']
)
def test_sparse_inputs(self, act, kernel, do_stabilize):
if do_stabilize and act != 'relu':
raise absltest.SkipTest('Stabilization possible only in Relu.')
key = random.PRNGKey(1)
input_count = 4
sparse_count = 2
input_size = 3
width = 1024
# NOTE(schsam): It seems that convergence is slower when inputs are sparse.
samples = N_SAMPLES
if default_backend() == 'gpu':
tol = 5e-4
samples = 100 * N_SAMPLES
else:
tol = {onp.dtype(onp.float32): 5e-2, onp.dtype(onp.float64): 5e-3}
# a batch of dense inputs
x_dense = random.normal(key, (input_count, input_size))
x_sparse = x_dense.at[:sparse_count, :].set(0.)
activation = (stax.Relu(do_stabilize=do_stabilize) if act == 'relu'
else stax.Erf())
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Dense(width),
activation,
stax.Dense(1 if kernel == 'ntk' else width))
exact = kernel_fn(x_sparse, None, kernel)
mc = nt.monte_carlo_kernel_fn(
init_fn,
apply_fn,
random.split(key, 2)[0],
samples,
vmap_axes=0,
device_count=-1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)(x_sparse, None, kernel)
mc = np.reshape(mc, exact.shape)
assert not np.any(np.isnan(exact))
self.assertAllClose(exact[sparse_count:, sparse_count:],
mc[sparse_count:, sparse_count:],
rtol=tol, atol=tol)
def test_composition_dense(self):
rng = random.PRNGKey(0)
x1 = random.normal(rng, (2, 3))
x2 = random.normal(rng, (4, 3))
Block = stax.serial(stax.Dense(256), stax.Relu())
_, _, ker_fn = Block
_, _, composed_ker_fn = stax.serial(Block, Block)
ker_out = ker_fn(ker_fn(x1))
composed_ker_out = composed_ker_fn(x1)
self.assertAllClose(ker_out, composed_ker_out)
ker_out = ker_fn(ker_fn(x1, x2))
composed_ker_out = composed_ker_fn(x1, x2)
self.assertAllClose(ker_out, composed_ker_out)
@test_utils.product(
avg_pool=[True, False],
same_inputs=[True, False]
)
def test_composition_conv(self, avg_pool, same_inputs):
rng = random.PRNGKey(0)
x1 = random.normal(rng, (3, 5, 5, 3))
x2 = None if same_inputs else random.normal(rng, (4, 5, 5, 3))
Block = stax.serial(stax.Conv(256, (3, 3)), stax.Relu())
if avg_pool:
Readout = stax.serial(stax.Conv(256, (3, 3)),
stax.GlobalAvgPool(),
stax.Dense(10))
else:
Readout = stax.serial(stax.Flatten(), stax.Dense(10))
block_ker_fn, readout_ker_fn = Block[2], Readout[2]
_, _, composed_ker_fn = stax.serial(Block, Readout)
composed_ker_out = composed_ker_fn(x1, x2)
ker_out_no_marg = readout_ker_fn(block_ker_fn(x1, x2,
diagonal_spatial=False))
ker_out_default = readout_ker_fn(block_ker_fn(x1, x2))
self.assertAllClose(composed_ker_out, ker_out_no_marg)
self.assertAllClose(composed_ker_out, ker_out_default)
if avg_pool:
with self.assertRaises(ValueError):
ker_out = readout_ker_fn(block_ker_fn(x1, x2, diagonal_spatial=True))
else:
ker_out_marg = readout_ker_fn(block_ker_fn(x1, x2,
diagonal_spatial=True))
self.assertAllClose(composed_ker_out, ker_out_marg)
class ParameterizationTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
get=['nngp', 'ntk'],
s=[2**9, 2**8, 2**7],
depth=[0, 1, 2],
same_inputs=[True, False],
W_std=[0., 1., 2.],
b_std=[None, 0., 0.5**0.5, 2],
parameterization=['ntk', 'standard']
)
def test_linear(
self,
get,
s,
depth,
same_inputs,
b_std,
W_std,
parameterization,
):
if parameterization == 'standard':
width = 2**9 // s
elif parameterization == 'ntk':
if s != 2**9:
raise absltest.SkipTest(
'"ntk" parameterization does not depend on "s".')
width = 2**10
else:
raise ValueError(parameterization)
layers = []
for i in range(depth + 1):
s_in = 1 if i == 0 else s
s_out = 1 if (i == depth and get == 'ntk') else s
out_dim = 1 if (i == depth and get == 'ntk') else width * (i + 1)
layers += [stax.Dense(out_dim,
W_std=W_std / (i + 1),
b_std=b_std if b_std is None else b_std / (i + 1),
parameterization=parameterization,
s=(s_in, s_out))]
net = stax.serial(*layers)
net = net, (BATCH_SIZE, 3), -1, 1
_check_agreement_with_empirical(
self, net, same_inputs, False, get == 'ntk', rtol=0.02, atol=10)
@test_utils.product(
model=MODELS,
width=[2**10],
same_inputs=[False],
is_ntk=[False, True],
filter_shape=FILTER_SHAPES,
proj_into_2d=PROJECTIONS[:2],
W_std=[0., 1., 2.],
b_std=[None, 0., 0.5**0.5],
parameterization=['ntk', 'standard'],
s=[2**10]
)
def test_nonlinear(
self,
model,
width,
same_inputs,
is_ntk,
filter_shape,
proj_into_2d,
b_std,
W_std,
parameterization,
s
):
is_conv = 'conv' in model
if parameterization == 'standard':
width //= s
padding = PADDINGS[0]
strides = STRIDES[0]
phi = stax.Relu()
use_pooling, is_res = False, False
layer_norm = None
pool_type = 'AVG'
use_dropout = False
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
if is_conv:
test_utils.skip_test(self)
elif proj_into_2d != PROJECTIONS[0] or filter_shape != FILTER_SHAPES[0]:
raise absltest.SkipTest('FC models do not have these parameters.')
net = _get_net(W_std=W_std,
b_std=b_std,
filter_shape=filter_shape,
is_conv=is_conv,
use_pooling=use_pooling,
is_res=is_res,
padding=padding,
phi=phi,
strides=strides,
width=width,
is_ntk=is_ntk,
proj_into_2d=proj_into_2d,
pool_type=pool_type,
layer_norm=layer_norm,
parameterization=parameterization,
s=s,
use_dropout=use_dropout)
_check_agreement_with_empirical(
self,
net=net,
same_inputs=same_inputs,
use_dropout=use_dropout,
is_ntk=is_ntk,
rtol=0.015,
atol=1000
)
class ParallelInOutTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
same_inputs=[True, False],
kernel_type=['ntk']
)
def test_parallel_in(self, same_inputs, kernel_type):
platform = default_backend()
rtol = RTOL if platform != 'tpu' else 0.05
rng = random.PRNGKey(0)
input_key1, input_key2, mc_key = random.split(rng, 3)
x1_1, x2_1 = _get_inputs(input_key1, same_inputs, (BATCH_SIZE, 2))
x1_2, x2_2 = _get_inputs(input_key2, same_inputs, (BATCH_SIZE, 3))
x1 = (x1_1, x1_2)
x2 = (x2_1, x2_2)
N = 2 ** 7
def net(logits):
return stax.serial(
stax.parallel(stax.Dense(N), stax.Dense(N)),
stax.serial(stax.FanInSum(), stax.Dense(logits)))
init_fn, apply_fn, kernel_fn = net(N if kernel_type == 'nngp' else 1)
kernel_fn_empirical = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, mc_key, N_SAMPLES, trace_axes=(-1,),
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=((0, 0), 0, {})
)
test_utils.assert_close_matrices(self,
kernel_fn(x1, x2, kernel_type),
kernel_fn_empirical(x1, x2, kernel_type),
rtol)
@test_utils.product(
same_inputs=[True, False],
kernel_type=['ntk']
)
def test_parallel_out(self, same_inputs, kernel_type):
platform = default_backend()
rtol = RTOL if platform != 'tpu' else 0.05
rng = random.PRNGKey(0)
input_key1, mc_key = random.split(rng, 2)
x1, x2 = _get_inputs(input_key1, same_inputs, (BATCH_SIZE, 1))
N = 2 ** 10
def net(logits):
return stax.serial(
stax.Dense(N),
stax.FanOut(2),
stax.parallel(stax.Dense(logits), stax.Dense(logits)))
init_fn, apply_fn, kernel_fn = net(N if kernel_type == 'nngp' else 1)
kernel_fn_empirical = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, mc_key, N_SAMPLES, trace_axes=(-1,),
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=(0, [0, 0], {}))
test_utils.assert_close_matrices(self,
kernel_fn(x1, x2, kernel_type),
kernel_fn_empirical(x1, x2, kernel_type),
rtol)
@test_utils.product(
same_inputs=[True, False],
kernel_type=['ntk']
)
def test_parallel_in_out(self, same_inputs, kernel_type):
platform = default_backend()
rtol = RTOL if platform != 'tpu' else 0.05
rng = random.PRNGKey(0)
input_key1, input_key2, mc_key = random.split(rng, 3)
x1_1, x2_1 = _get_inputs(input_key1, same_inputs, (BATCH_SIZE, 1))
x1_2, x2_2 = _get_inputs(input_key2, same_inputs, (BATCH_SIZE, 2))
x1 = (x1_1, x1_2)
x2 = (x2_1, x2_2)
N_in = 2 ** 10
N_out = N_in if kernel_type == 'nngp' else 1
readin = stax.serial(stax.parallel(stax.Dense(N_in), stax.Dense(N_in)),
stax.FanInSum())
readout = stax.serial(stax.FanOut(3),
stax.parallel(stax.Dense(N_out),
stax.Dense(N_out + 1),
stax.Dense(N_out + 2)))
init_fn, apply_fn, _ = stax.serial(readin, readout)
K_readin_fn = jit(readin[2])
K_readout_fn = jit(functools.partial(readout[2], get=kernel_type))
kernel_fn_empirical = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, mc_key, N_SAMPLES, trace_axes=(-1,),
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=((0, 0), [0, 0, 0], {})
)
test_utils.assert_close_matrices(
self,
K_readout_fn(K_readin_fn(x1, x2)),
kernel_fn_empirical(x1, x2, get=kernel_type),
rtol)
# Check Both (here we just want to make sure we _can_ compute the output).
K_readin_fn = jit(readin[2])
K_readout_fn = jit(functools.partial(readout[2], get=('nngp', 'ntk')))
K_readout_fn(K_readin_fn(x1, x2))
@test_utils.product(
same_inputs=[True, False],
kernel_type=['ntk']
)
def test_nested_parallel(self, same_inputs, kernel_type):
platform = default_backend()
rtol = RTOL if platform != 'tpu' else 0.05
rng = random.PRNGKey(0)
(input_key1,
input_key2,
input_key3,
input_key4,
mask_key,
mc_key) = random.split(rng, 6)
x1_1, x2_1 = _get_inputs(input_key1, same_inputs, (BATCH_SIZE, 5))
x1_2, x2_2 = _get_inputs(input_key2, same_inputs, (BATCH_SIZE, 2, 2, 2))
x1_3, x2_3 = _get_inputs(input_key3, same_inputs, (BATCH_SIZE, 2, 2, 3))
x1_4, x2_4 = _get_inputs(input_key4, same_inputs, (BATCH_SIZE, 3, 4))
m1_key, m2_key, m3_key, m4_key = random.split(mask_key, 4)
x1_1 = test_utils.mask(
x1_1, mask_constant=-1, mask_axis=(1,), key=m1_key, p=0.5)
x1_2 = test_utils.mask(
x1_2, mask_constant=-1, mask_axis=(2, 3,), key=m2_key, p=0.5)
if not same_inputs:
x2_3 = test_utils.mask(
x2_3, mask_constant=-1, mask_axis=(1, 3,), key=m3_key, p=0.5)
x2_4 = test_utils.mask(
x2_4, mask_constant=-1, mask_axis=(2,), key=m4_key, p=0.5)
x1 = (((x1_1, x1_2), x1_3), x1_4)
x2 = (((x2_1, x2_2), x2_3), x2_4) if not same_inputs else None
N_in = 2 ** 7
# We only include dropout on non-TPU backends, because it takes large N to
# converge on TPU.
dropout_or_id = stax.Dropout(0.9) if platform != 'tpu' else stax.Identity()
init_fn, apply_fn, kernel_fn = stax.parallel(
stax.parallel(
stax.parallel(stax.Dense(N_in),
stax.serial(stax.Conv(N_in + 1, (2, 2)),
stax.Flatten())),
stax.serial(stax.Conv(N_in + 2, (2, 2)),
dropout_or_id,
stax.GlobalAvgPool())),
stax.Conv(N_in + 3, (2,)))
kernel_fn_empirical = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=mc_key,
n_samples=N_SAMPLES,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=(((((0, 0), 0), 0), (((0, 0), 0), 0), {})
if platform == 'tpu' else None)
)
test_utils.assert_close_matrices(
self,
kernel_fn(x1, x2, get=kernel_type, mask_constant=-1),
kernel_fn_empirical(x1, x2, get=kernel_type, mask_constant=-1),
rtol)
if __name__ == '__main__':
absltest.main()
| 30,440 | 29.38024 | 80 | py |
neural-tangents | neural-tangents-main/tests/stax/combinators_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/stax/combinators.py`."""
import random as prandom
from absl.testing import absltest
from jax import random
from jax.config import config
import jax.numpy as np
from neural_tangents import stax
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
test_utils.update_test_tolerance()
prandom.seed(1)
class RepeatTest(test_utils.NeuralTangentsTestCase):
def _test_repeat(self, x1, x2, layer, n, rng_params, **kwargs):
init_fn, apply_fn, kernel_fn = (stax.Identity() if n == 0 else
stax.serial(*([layer] * n)))
init_fn_repeat, apply_fn_repeat, kernel_fn_repeat = stax.repeat(layer, n)
out_shape, params = init_fn(rng_params, x1.shape)
out_shape_repeat, params_repeat = init_fn_repeat(rng_params, x1.shape)
self.assertEqual(out_shape, out_shape_repeat)
kwargs1 = {k: kwargs[k][0] for k in kwargs}
out = apply_fn(params, x1, **kwargs1)
out_repeat = apply_fn_repeat(params_repeat, x1, **kwargs1)
self.assertAllClose(out, out_repeat)
for get in [None, 'ntk', 'nngp', 'cov1', ('nngp', 'cov1'), ('cov1', 'ntk')]:
with self.subTest(get=get):
k = kernel_fn(x1, x2, get, **kwargs)
k_repeat = kernel_fn_repeat(x1, x2, get, **kwargs)
self.assertAllClose(k, k_repeat)
@test_utils.product(
same_inputs=[
False,
True
],
n=[
0,
1,
2,
3,
],
layer=[
stax.Identity(),
stax.Dense(3),
stax.serial(stax.Identity()),
stax.serial(stax.Dense(3)),
stax.GlobalAvgPool(),
stax.serial(stax.Dense(3), stax.Relu()),
stax.serial(stax.Dense(3), stax.Relu(), stax.Dense(3))
]
)
def test_repeat(
self,
same_inputs,
n,
layer
):
rng_input, rng_params = random.split(random.PRNGKey(1), 2)
x1 = np.cos(random.normal(rng_input, (2, 3)))
x2 = None if same_inputs else random.normal(rng_input, (4, 3))
self._test_repeat(x1, x2, layer, n, rng_params)
@test_utils.product(
same_inputs=[
False,
True
],
n=[
0,
1,
2,
3,
],
layer=[
stax.serial(stax.Conv(3, (2, 2), padding='SAME'),
stax.Relu(),
stax.Conv(3, (2, 2), padding='SAME'),
stax.Gelu()
),
]
)
def test_repeat_conv(
self,
same_inputs,
n,
layer
):
rng_input, rng_params = random.split(random.PRNGKey(1), 2)
x1 = np.cos(random.normal(rng_input, (2, 4, 4, 3)))
x2 = None if same_inputs else random.normal(rng_input, (4, 4, 4, 3))
self._test_repeat(x1, x2, layer, n, rng_params)
@test_utils.product(
same_inputs=[
False,
True
],
n=[
0,
1,
2,
3,
],
layer=[
stax.Aggregate(),
stax.serial(stax.Dense(3), stax.Aggregate(), stax.Abs()),
stax.serial(stax.Conv(3, (2, 2), padding='SAME'),
stax.Aggregate(),
stax.Abs(),
stax.Conv(3, (1, 2), padding='SAME'),
)
]
)
def test_repeat_agg(
self,
same_inputs,
n,
layer
):
rng_input, rng_params, rng_p1, rng_p2 = random.split(random.PRNGKey(1), 4)
x1 = np.cos(random.normal(rng_input, (2, 4, 3, 3)))
x2 = None if same_inputs else random.normal(rng_input, (4, 4, 3, 3))
p1 = random.normal(rng_p1, x1.shape[:-1] + x1.shape[1:-1])
p2 = p1 if x2 is None else random.normal(rng_p2,
x2.shape[:-1] + x2.shape[1:-1])
self._test_repeat(x1, x2, layer, n, rng_params, pattern=(p1, p2))
self._test_repeat(x1, x2, layer, n, rng_params, pattern=(None, None))
if __name__ == '__main__':
absltest.main()
| 4,604 | 26.909091 | 80 | py |
neural-tangents | neural-tangents-main/tests/stax/branching_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/stax/branching.py`."""
import random as prandom
from absl.testing import absltest
from jax import default_backend
from jax import random
from jax.config import config
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
test_utils.update_test_tolerance()
prandom.seed(1)
class FanInTest(test_utils.NeuralTangentsTestCase):
@classmethod
def _get_phi(cls, i):
return {
0: stax.Relu(),
1: stax.Erf(),
2: stax.Abs()
}[i % 3]
@test_utils.product(
same_inputs=[False],
axis=[0, 1],
n_branches=[3],
get=['ntk'],
branch_in=['dense_before_branch_in', 'dense_after_branch_in'],
fan_in_mode=['FanInSum', 'FanInConcat', 'FanInProd']
)
def test_fan_in_fc(
self,
same_inputs,
axis,
n_branches,
get,
branch_in,
fan_in_mode
):
if fan_in_mode in ['FanInSum', 'FanInProd']:
if axis != 0:
raise absltest.SkipTest('`FanInSum` and `FanInProd` are skipped when '
'axis != 0.')
axis = None
if (fan_in_mode == 'FanInSum' or
axis == 0) and branch_in == 'dense_after_branch_in':
raise absltest.SkipTest('`FanInSum` and `FanInConcat(0)` '
'require `is_gaussian`.')
if ((axis == 1 or fan_in_mode == 'FanInProd') and
branch_in == 'dense_before_branch_in'):
raise absltest.SkipTest(
'`FanInConcat` or `FanInProd` on feature axis requires a dense layer '
'after concatenation or Hadamard product.')
if fan_in_mode == 'FanInSum':
fan_in_layer = stax.FanInSum()
elif fan_in_mode == 'FanInProd':
fan_in_layer = stax.FanInProd()
else:
fan_in_layer = stax.FanInConcat(axis)
if n_branches != 2:
test_utils.skip_test(self)
key = random.PRNGKey(1)
X0_1 = np.cos(random.normal(key, (4, 3)))
X0_2 = None if same_inputs else random.normal(key, (8, 3))
width = 1024
n_samples = 256 * 2
if default_backend() == 'tpu':
tol = 0.07
else:
tol = 0.02
dense = stax.Dense(width, 1.25, 0.1)
input_layers = [dense,
stax.FanOut(n_branches)]
branches = []
for b in range(n_branches):
branch_layers = [FanInTest._get_phi(b)]
for i in range(b):
multiplier = 1 if axis not in (1, -1) else (1 + 0.25 * i)
branch_layers += [
stax.Dense(int(width * multiplier), 1. + 2 * i, 0.5 + i),
FanInTest._get_phi(i)]
if branch_in == 'dense_before_branch_in':
branch_layers += [dense]
branches += [stax.serial(*branch_layers)]
output_layers = [
fan_in_layer,
stax.Relu()
]
if branch_in == 'dense_after_branch_in':
output_layers.insert(1, dense)
nn = stax.serial(*(input_layers + [stax.parallel(*branches)] +
output_layers))
if get == 'nngp':
init_fn, apply_fn, kernel_fn = nn
elif get == 'ntk':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(1, 1.25, 0.5))
else:
raise ValueError(get)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples,
device_count=0 if axis in (0, -2) else -1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=None if axis in (0, -2) else 0,
)
exact = kernel_fn(X0_1, X0_2, get=get)
empirical = kernel_fn_mc(X0_1, X0_2, get=get)
test_utils.assert_close_matrices(self, empirical, exact, tol)
@test_utils.product(
same_inputs=[False],
axis=[0, 1, 2, 3],
n_branches=[2],
get=['ntk'],
branch_in=['dense_before_branch_in', 'dense_after_branch_in'],
readout=['pool', 'flatten'],
fan_in_mode=['FanInSum', 'FanInConcat', 'FanInProd']
)
def test_fan_in_conv(
self,
same_inputs,
axis,
n_branches,
get,
branch_in,
readout,
fan_in_mode
):
test_utils.skip_test(self)
if fan_in_mode in ['FanInSum', 'FanInProd']:
if axis != 0:
raise absltest.SkipTest('`FanInSum` and `FanInProd()` are skipped when '
'axis != 0.')
axis = None
if (fan_in_mode == 'FanInSum' or
axis in [0, 1, 2]) and branch_in == 'dense_after_branch_in':
raise absltest.SkipTest('`FanInSum` and `FanInConcat(0/1/2)` '
'require `is_gaussian`.')
if ((axis == 3 or fan_in_mode == 'FanInProd') and
branch_in == 'dense_before_branch_in'):
raise absltest.SkipTest('`FanInConcat` or `FanInProd` on feature axis '
'requires a dense layer after concatenation '
'or Hadamard product.')
if fan_in_mode == 'FanInSum':
fan_in_layer = stax.FanInSum()
elif fan_in_mode == 'FanInProd':
fan_in_layer = stax.FanInProd()
else:
fan_in_layer = stax.FanInConcat(axis)
key = random.PRNGKey(1)
X0_1 = random.normal(key, (2, 5, 6, 3))
X0_2 = None if same_inputs else random.normal(key, (3, 5, 6, 3))
if default_backend() == 'tpu':
width = 2048
n_samples = 1024
tol = 0.02
else:
width = 1024
n_samples = 512
tol = 0.015
conv = stax.Conv(out_chan=width,
filter_shape=(3, 3),
padding='SAME',
W_std=1.25,
b_std=0.1)
input_layers = [conv,
stax.FanOut(n_branches)]
branches = []
for b in range(n_branches):
branch_layers = [FanInTest._get_phi(b)]
for i in range(b):
multiplier = 1 if axis not in (3, -1) else (1 + 0.25 * i)
branch_layers += [
stax.Conv(
out_chan=int(width * multiplier),
filter_shape=(i + 1, 4 - i),
padding='SAME',
W_std=1.25 + i,
b_std=0.1 + i),
FanInTest._get_phi(i)]
if branch_in == 'dense_before_branch_in':
branch_layers += [conv]
branches += [stax.serial(*branch_layers)]
output_layers = [
fan_in_layer,
stax.Relu(),
stax.GlobalAvgPool() if readout == 'pool' else stax.Flatten()
]
if branch_in == 'dense_after_branch_in':
output_layers.insert(1, conv)
nn = stax.serial(*(input_layers + [stax.parallel(*branches)] +
output_layers))
init_fn, apply_fn, kernel_fn = stax.serial(
nn, stax.Dense(1 if get == 'ntk' else width, 1.25, 0.5))
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn,
apply_fn,
key,
n_samples,
device_count=0 if axis in (0, -4) else -1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=None if axis in (0, -4) else 0,
)
exact = kernel_fn(X0_1, X0_2, get=get)
empirical = kernel_fn_mc(X0_1, X0_2, get=get)
test_utils.assert_close_matrices(self, empirical, exact, tol)
if __name__ == '__main__':
absltest.main()
| 7,845 | 28.946565 | 80 | py |
neural-tangents | neural-tangents-main/tests/stax/linear_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/stax/linear.py."""
import itertools
import random as prandom
import string
import time
from absl.testing import absltest
from jax import lax
from jax import jit, vjp
from jax.config import config
from jax import default_backend
import jax.numpy as np
from jax import random
import more_itertools
import neural_tangents as nt
from neural_tangents import stax
from tests import test_utils
from neural_tangents._src.utils import utils
import numpy as onp
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
test_utils.update_test_tolerance()
prandom.seed(1)
@test_utils.product(
same_inputs=[True, False]
)
class FlattenTest(test_utils.NeuralTangentsTestCase):
def test_flatten(self, same_inputs):
key = random.PRNGKey(1)
X0_1 = random.normal(key, (4, 4, 3, 2))
X0_2 = None if same_inputs else random.normal(key, (2, 4, 3, 2))
X0_1_flat = np.reshape(X0_1, (X0_1.shape[0], -1))
X0_2_flat = None if X0_2 is None else np.reshape(X0_2, (X0_2.shape[0], -1))
dense = stax.Dense(512, 1.7, 0.1)
init_fc, apply_fc, kernel_fc = stax.serial(dense,
stax.Erf(),
dense)
init_top, apply_top, kernel_top = stax.serial(dense,
stax.Erf(),
dense,
stax.Flatten())
init_mid, apply_mid, kernel_mid = stax.serial(dense,
stax.Erf(),
stax.Flatten(),
dense)
init_bot, apply_bot, kernel_bot = stax.serial(stax.Flatten(),
dense,
stax.Erf(),
dense)
kernel_fc = jit(kernel_fc)
kernel_top = jit(kernel_top)
kernel_mid = jit(kernel_mid)
kernel_bot = jit(kernel_bot)
n = 100
kernel_fc_mc = nt.monte_carlo_kernel_fn(
init_fc, apply_fc, key, n, vmap_axes=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
kernel_bot_mc = nt.monte_carlo_kernel_fn(
init_bot, apply_bot, key, n, vmap_axes=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
kernel_mid_mc = nt.monte_carlo_kernel_fn(
init_mid, apply_mid, key, n, vmap_axes=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
kernel_top_mc = nt.monte_carlo_kernel_fn(
init_top, apply_top, key, n, vmap_axes=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION
)
K = kernel_fc(X0_1_flat, X0_2_flat)
K_bot = kernel_bot(X0_1, X0_2)
K_bot_flat = kernel_bot(X0_1_flat, X0_2_flat)
self.assertAllClose(K_bot, K)
self.assertAllClose(K_bot_flat, K)
def assert_close(a, b):
self.assertAllClose(a, b, atol=0.05, rtol=0.02)
K_fc_mc = kernel_fc_mc(X0_1_flat, X0_2_flat, get='nngp')
K_bot_mc = kernel_bot_mc(X0_1, X0_2, get='nngp')
K_bot_flat_mc = kernel_bot_mc(X0_1_flat, X0_2_flat, get='nngp')
assert_close(K_fc_mc, K.nngp)
assert_close(K_bot_mc, K_bot.nngp)
assert_close(K_bot_flat_mc, K_bot_flat.nngp)
K_mid = kernel_mid(X0_1, X0_2)
K_mid_flat = kernel_mid(X0_1_flat, X0_2_flat)
K_mid_mc = kernel_mid_mc(X0_1, X0_2, get='nngp')
K_mid_flat_mc = kernel_mid_mc(X0_1_flat, X0_2_flat, get='nngp')
assert_close(K_mid_mc, K_mid.nngp)
assert_close(K_mid_flat, K)
assert_close(K_mid_flat_mc, K_mid_flat.nngp)
K_top = kernel_top(X0_1, X0_2).replace(is_gaussian=True,
shape1=K_mid.shape1,
shape2=K_mid.shape2)
K_top_flat = kernel_top(X0_1_flat, X0_2_flat).replace(is_gaussian=True)
K_top_mc = kernel_top_mc(X0_1, X0_2, get='nngp')
K_top_flat_mc = kernel_top_mc(X0_1_flat, X0_2_flat, get='nngp')
assert_close(K_top_flat, K)
assert_close(K_top_mc, K_top.nngp)
assert_close(K_top_flat_mc, K_top_flat.nngp)
assert_close(K_top, K_mid)
class ConvNDTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
same_inputs=[False],
n=[0, 1, 2],
get=['ntk'],
proj=['flatten', 'pool'],
use_attn=[True],
channels_first=[True, False],
use_dropout=[True],
use_layernorm=[True],
)
def test_conv_nd(
self,
same_inputs,
n,
get,
proj,
use_attn,
channels_first,
use_dropout,
use_layernorm
):
platform = default_backend()
if platform == 'cpu':
test_utils.skip_test(self)
elif platform == 'gpu' and n not in (0, 1, 2, 3):
raise absltest.SkipTest('>=4D CNN does not work on GPU.')
elif platform == 'tpu' and use_dropout and same_inputs:
raise absltest.SkipTest('Batched empirical kernel with dropout not '
'supported.')
width = 1024
n_samples = 512
tol = 0.03 if platform == 'tpu' else 0.015
key = random.PRNGKey(1)
n_max = 5
spatial_shape = (2, 3, 5, 4, 3)[:n] + (1,) * (n - n_max)
filter_shape = (1, 2, 3, 1, 1)[:n] + (1,) * (n - n_max)
strides = (1, 1, 2, 1, 2)[:n] + (1,) * (n - n_max)
spatial_spec = ''.join(c for c in string.ascii_uppercase
if c not in ('N', 'C', 'I', 'O'))[:n]
filter_spec = spatial_spec + 'IO'
if channels_first:
channel_axis = 1
dimension_numbers = ('NC' + spatial_spec, filter_spec,
'NC' + spatial_spec)
X0_1 = random.normal(key, (2, 3) + spatial_shape)
X0_2 = None if same_inputs else random.normal(key, (4, 3) + spatial_shape)
else:
channel_axis = -1
dimension_numbers = ('N' + spatial_spec + 'C', filter_spec,
'N' + spatial_spec + 'C')
X0_1 = random.normal(key, (2,) + spatial_shape + (3,))
X0_2 = None if same_inputs else random.normal(key,
(4,) + spatial_shape + (3,))
layernorm_axes = (dimension_numbers[2].index('C'),)
if 'H' in dimension_numbers[2]:
layernorm_axes += (dimension_numbers[2].index('H'),)
if proj == 'pool':
proj = stax.GlobalAvgPool(channel_axis=channel_axis)
elif proj == 'flatten':
proj = stax.Flatten()
else:
raise ValueError(proj)
if use_attn:
n_heads = int(np.sqrt(width))
n_chan_val = int(np.round(float(width) / n_heads))
proj = stax.serial(stax.GlobalSelfAttention(
n_chan_out=width,
n_chan_key=width,
n_chan_val=n_chan_val,
n_heads=n_heads,
linear_scaling=True,
W_key_std=2.,
W_value_std=1.,
W_query_std=1.,
W_out_std=1.0,
b_std=0.1,
channel_axis=channel_axis), proj)
nn = stax.serial(
stax.Conv(width, filter_shape, None, 'SAME',
dimension_numbers=dimension_numbers),
(stax.LayerNorm(layernorm_axes,
channel_axis=channel_axis)
if use_layernorm else stax.Identity()),
stax.Relu(),
(stax.Dropout(0.8) if use_dropout else stax.Identity()),
stax.Conv(width, filter_shape, strides, 'CIRCULAR',
dimension_numbers=dimension_numbers),
stax.Abs(),
proj
)
if get == 'nngp':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(width, 2., 0.5))
elif get == 'ntk':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(1, 2., 0.5))
else:
raise ValueError(get)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
exact = kernel_fn(X0_1, X0_2, get=get)
empirical = kernel_fn_mc(X0_1, X0_2, get=get)
test_utils.assert_close_matrices(self, empirical, exact, tol)
class AttentionTest(test_utils.NeuralTangentsTestCase):
@test_utils.parameters(
dict(
same_inputs=same_inputs,
get=get,
n=n,
linear_scaling=linear_scaling,
mask_constant=mask_constant,
p=p,
mask_axis=mask_axis,
pos_emb_type=pos_emb_type,
n_chan_pos_emb=n_chan_pos_emb,
pos_emb_decay_fn=pos_emb_decay_fn,
val_pos_emb=val_pos_emb,
W_pos_emb_std=W_pos_emb_std
)
for same_inputs in [
False
]
for get in [
'ntk'
]
for n in [
2,
]
for linear_scaling in [
True,
False
]
for mask_constant in [
10.
]
for p in [0.5]
for mask_axis in [(-1,)]
for pos_emb_type in [
'CONCAT',
'SUM',
'NONE'
]
for n_chan_pos_emb in (
[None] if pos_emb_type != 'CONCAT'
else [None, 512]
)
for pos_emb_decay_fn in [
None,
'linear'
]
for val_pos_emb in ([
True,
False
] if pos_emb_type != 'NONE' else [True])
for W_pos_emb_std in ([
2,
] if pos_emb_type != 'NONE' else [0.])
)
def test_attention(
self,
same_inputs,
get,
n,
linear_scaling,
mask_constant,
p,
mask_axis,
pos_emb_type,
n_chan_pos_emb,
pos_emb_decay_fn,
val_pos_emb,
W_pos_emb_std
):
test_utils.skip_test(self)
width = 1024
n_samples = 1024
tol = 0.05
key = random.PRNGKey(1)
n_chan_in = 2
spatial_shape = (2, 3, 4, 3, 2, 1)[:n]
mask_axis = [i % (n + 2) for i in mask_axis]
def get_x0(batch_size):
x0 = random.normal(key, (batch_size,) + spatial_shape + (n_chan_in,))
x0 = test_utils.mask(x0, mask_constant, mask_axis, key, p)
return x0
X0_1 = get_x0(2)
X0_2 = None if same_inputs else get_x0(4)
pos_emb_fns = {
None: None,
'one_hot': lambda x: x == 0,
'linear': lambda x: 1 / (1 + 4 * x)
}
def get_attn():
return stax.GlobalSelfAttention(
linear_scaling=linear_scaling,
n_chan_out=width,
n_chan_key=width,
n_chan_val=int(np.round(float(width) / int(np.sqrt(width)))),
n_heads=int(np.sqrt(width)),
n_chan_pos_emb=n_chan_pos_emb,
attention_mechanism='SOFTMAX' if linear_scaling else 'IDENTITY',
pos_emb_type=pos_emb_type,
W_pos_emb_std=W_pos_emb_std,
pos_emb_decay_fn=pos_emb_fns[pos_emb_decay_fn],
val_pos_emb=val_pos_emb,
W_key_std=0.9,
W_out_std=0.8,
W_query_std=0.7,
W_value_std=1.2,
b_std=0.5
)
nn = stax.serial(
stax.Conv(width, (1,) * n, padding='SAME'),
get_attn(),
stax.Relu(),
stax.GlobalAvgPool()
)
if get == 'nngp':
init_fn, apply_fn, kernel_fn = nn
elif get == 'ntk':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(1, 1., 0.))
else:
raise ValueError(get)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=n_samples,
device_count=-1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
kernel_fn = jit(kernel_fn, static_argnames='get')
exact = kernel_fn(X0_1, X0_2, get, mask_constant=mask_constant)
empirical = kernel_fn_mc(X0_1, X0_2, get=get, mask_constant=mask_constant)
test_utils.assert_close_matrices(self, empirical, exact, tol, 2.)
class AggregateTest(test_utils.NeuralTangentsTestCase):
@test_utils.parameters(
dict(
get=get,
readout=readout,
same_input=same_input,
activation=activation,
mask_constant=mask_constant,
shape=shape,
batch_axis=batch_axis,
channel_axis=channel_axis,
agg_axes=agg_axes,
do_batch=do_batch,
implementation=implementation,
to_dense=to_dense
)
for get in [
'ntk',
]
for same_input in [
False,
True
]
for act_name, activation in [
('Relu', stax.Relu()),
]
for mask_constant in [
10.
]
for shape in [
(4,),
(3, 2),
]
for batch_axis in range(len(shape) + 2)
for channel_axis in
[
c for c in range(len(shape) + 2)
if c != batch_axis
]
for agg_axes in [None] +
list(more_itertools.powerset(
[p for p in range(len(shape) + 2)
if p not in (batch_axis, channel_axis)]))
for do_batch in ([
True
] if batch_axis == 0 else [False])
for implementation in ['DENSE', 'SPARSE']
for to_dense in [
'identity',
'sparse_to_dense',
]
for name, readout in [
('Pooling',
stax.GlobalAvgPool(
batch_axis=batch_axis,
channel_axis=channel_axis)),
]
)
def test_aggregate(
self,
get,
readout,
same_input,
activation,
mask_constant,
shape,
batch_axis,
channel_axis,
agg_axes,
do_batch,
implementation,
to_dense
):
if len(shape) > 1:
test_utils.skip_test(self)
if implementation == 'SPARSE' and to_dense != 'identity':
raise absltest.SkipTest('`implementation="SPARSE"` ignores '
'`to_dense` argument.')
if get == 'cov2' and same_input:
raise absltest.SkipTest('`get="cov2"` only defined for different inputs.')
if get in ('cov1', 'cov2') and do_batch:
raise absltest.SkipTest('Batching of empirical kernel does not work for '
'`diagonal_axes != ()`.')
batch1, batch2 = 8, 4
num_channels = 1
output_dims = 1 if get == 'ntk' else 2**6
key = random.PRNGKey(1)
key, split1, split2 = random.split(key, 3)
x1 = random.normal(split1, (batch1,) + shape + (num_channels,))
x1 = np.moveaxis(x1, (0, -1), (batch_axis, channel_axis))
if same_input:
x2 = None
else:
x2 = random.normal(split2, (batch2,) + shape + (num_channels,))
x2 = np.moveaxis(x2, (0, -1), (batch_axis, channel_axis))
if mask_constant is not None:
key, split1, split2 = random.split(key, 3)
shape1 = list(x1.shape)
shape1[channel_axis] = 1
mask1 = random.bernoulli(split1, p=0.3, shape=shape1)
x1 = np.where(mask1, mask_constant, x1)
if x2 is not None:
shape2 = list(x2.shape)
shape2[channel_axis] = 1
mask2 = random.bernoulli(split2, p=0.2, shape=shape2)
x2 = np.where(mask2, mask_constant, x2)
key, split1, split2 = random.split(key, 3)
agg_shape = shape if agg_axes is None else tuple(x1.shape[a]
for a in agg_axes)
agg_ndim = len(agg_shape)
def sparse_to_dense(pattern):
if pattern is None:
return None
pattern = pattern.reshape(pattern.shape[:2] + (pattern.shape[2] * 2,))
bsz, n_edges, n_dims = pattern.shape
batch_range = np.broadcast_to(
np.arange(bsz).reshape((bsz, 1, 1)),
(bsz, n_edges, 1))
pattern = np.concatenate([batch_range, pattern], 2)
pattern = pattern.reshape((bsz * n_edges, n_dims + 1))
out = np.zeros((bsz,) + tuple(a for a in agg_shape for _ in (0, 1)))
out = out.at[tuple(pattern.T)].add(1.)
out = utils.unzip_axes(out, 1)
return out
if to_dense == 'sparse_to_dense' or implementation == 'SPARSE':
def get_sparse_pattern(batch_size, rng):
n_edges_max = onp.prod((1,) + agg_shape)**2
n_edges = prandom.randint(0, n_edges_max)
pattern = [np.zeros((batch_size, n_edges, 0, 2), np.int32)]
for d in range(agg_ndim):
rng, _ = random.split(rng)
n_nodes = agg_shape[d]
edges = random.randint(rng, (batch_size, n_edges, 1, 2), 0, n_nodes)
pattern += [edges]
pattern = np.concatenate(pattern, 2)
mask = random.bernoulli(rng, p=0.2, shape=pattern.shape[:2])
# Make sure the receivers are masked to large negative number.
# The number needs to be larger than maximum size of `pattern` along
# any of the shape axes, to make `jax.ops.at` ignore these entries in
# `sparse_to_dense` above, otherwise they are treated as regular
# negative indices.
pattern = pattern.at[mask].set(-10000)
return pattern
pattern1 = get_sparse_pattern(batch1, split1)
pattern2 = pattern1 if same_input else get_sparse_pattern(batch2, split2)
else:
pattern1 = random.uniform(split1, (batch1,) + agg_shape * 2)
pattern2 = pattern1 if same_input else random.uniform(
split2, (batch2,) + agg_shape * 2)
# Build the infinite network.
def get_nn(to_dense, implementation):
return stax.serial(
stax.Dense(2**6, batch_axis=batch_axis, channel_axis=channel_axis),
activation,
stax.Aggregate(aggregate_axis=agg_axes,
batch_axis=batch_axis,
channel_axis=channel_axis,
to_dense=dict(
identity=lambda p: p,
sparse_to_dense=sparse_to_dense
)[to_dense],
implementation=implementation
),
readout,
stax.Dense(output_dims,
batch_axis=int(batch_axis > channel_axis),
channel_axis=int(batch_axis < channel_axis)))
init_fn, apply_fn, kernel_fn = get_nn(to_dense, implementation)
apply_fn = jit(apply_fn)
kernel_fn = jit(kernel_fn, static_argnames='get')
if do_batch:
kernel_fn = nt.batch(kernel_fn, batch_size=2)
exact = kernel_fn(x1, x2, get,
mask_constant=mask_constant,
pattern=(pattern1, pattern2))
rtol = 0.08
if to_dense == 'sparse_to_dense' or implementation == 'SPARSE':
init_fn_dense, apply_fn_dense, kernel_fn_dense = get_nn('identity',
'DENSE')
apply_fn_dense = jit(apply_fn_dense)
kernel_fn_dense = jit(kernel_fn_dense, static_argnames='get')
pattern1_dense = sparse_to_dense(pattern1)
pattern2_dense = sparse_to_dense(pattern2)
# Test parameters agreement
key, _ = random.split(key, 2)
_, params_sparse = init_fn(key, x1.shape)
_, params_dense = init_fn_dense(key, x1.shape)
self.assertAllClose(params_dense, params_sparse)
# Test forward-pass agreement
fx1_dense = apply_fn_dense(params_dense, x1, pattern=pattern1_dense)
fx1_sparse = apply_fn(params_sparse, x1, pattern=pattern1)
test_utils.assert_close_matrices(self, fx1_dense, fx1_sparse, rtol)
if not same_input:
fx2_dense = apply_fn_dense(params_dense, x2, pattern=pattern2_dense)
fx2_sparse = apply_fn(params_sparse, x2, pattern=pattern2)
test_utils.assert_close_matrices(self, fx2_dense, fx2_sparse, rtol)
# Test agreement with analytic dense kernel
exact_dense = kernel_fn_dense(x1, x2, get,
mask_constant=mask_constant,
pattern=(pattern1_dense, pattern2_dense))
self.assertAllClose(exact_dense, exact)
# Test agreement with empirical kernel
kernel_mc_fn = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=random.PRNGKey(10),
n_samples=2**6,
batch_size=2 if (default_backend() == 'tpu' and batch_axis == 0) else 0,
device_count=-1 if batch_axis == 0 else 0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
trace_axes=(int(batch_axis < channel_axis),)
)
if get in ('nngp', 'ntk'):
empirical = kernel_mc_fn(x1, x2, get,
mask_constant=mask_constant,
pattern=(pattern1, pattern2))
elif get in ('cov1', 'cov2'):
if get == 'cov1':
empirical = kernel_mc_fn(x1, None, 'nngp',
mask_constant=mask_constant,
pattern=(pattern1, pattern1))
elif get == 'cov2':
empirical = kernel_mc_fn(x2, None, 'nngp',
mask_constant=mask_constant,
pattern=(pattern2, pattern2))
empirical = np.moveaxis(np.diagonal(empirical), -1, 0)
else:
raise ValueError(get)
test_utils.assert_close_matrices(self, exact, empirical, rtol, 0.2)
class ConvTransposeTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
padding=['CIRCULAR', 'SAME', 'VALID'],
same_inputs=[False],
filter_shape=[2, 3, 4],
strides=[2, 3, 4],
size=[2, 3, 4],
diagonal_batch=[True],
diagonal_spatial=[True, False],
)
def test_conv_transpose(
self,
same_inputs,
padding,
filter_shape,
strides,
size,
diagonal_batch,
diagonal_spatial
):
if size > 2:
test_utils.skip_test(self)
width = 512
tol = 0.01
n_samples = 512
filter_shape = (filter_shape,)
strides = (strides,)
init_fn, apply_fn, kernel_fn = stax.ConvTranspose(width, filter_shape,
strides, padding,
b_std=0.1)
key = random.PRNGKey(1)
shape = (size, 1)
x1 = random.normal(key, (2,) + shape)
x2 = random.normal(key, (3,) + shape) if not same_inputs else None
k = kernel_fn(x1, x2,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
get='cov1' if diagonal_batch else 'nngp')
diagonal_axes = ()
if diagonal_batch:
diagonal_axes += (0,)
if diagonal_spatial:
diagonal_axes += (1,)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples, diagonal_axes=diagonal_axes,
device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
k_mc = kernel_fn_mc(x1, None if diagonal_batch else x2, 'nngp')
test_utils.assert_close_matrices(self, k_mc, k, tol)
@classmethod
def _conv_transpose_circular_via_grad(
cls,
lhs,
params,
strides,
padding,
dimension_numbers
):
"""Helper method: calculates conv transpose via grad for testing.
Adapted from `jax.tests.lax_test`.
"""
rhs = params[0]
rhs = np.swapaxes(rhs, dimension_numbers[1].index('O'),
dimension_numbers[1].index('I'))
rhs = np.flip(rhs, dimension_numbers[1].index('H'))
assert len(lhs.shape) == len(rhs.shape)
nspatial = len(lhs.shape) - 2
dn = lax.conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
in_shape = onp.take(lhs.shape, dn.lhs_spec)
in_sdims = in_shape[2:]
k_shape = onp.take(rhs.shape, dn.rhs_spec)
o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]
o_shape = [in_shape[0], k_shape[1]] + o_sdims
out_spec_inv = [x[0] for x in
sorted(enumerate(dn.out_spec), key=lambda x: x[1])]
o_layout = onp.take(onp.array(o_shape), out_spec_inv)
placeholder = np.ones(o_layout, lhs.dtype)
_, apply_fn, _ = stax.Conv(
out_chan=rhs.shape[dimension_numbers[1].index('I')],
filter_shape=(rhs.shape[dimension_numbers[1].index('H')],),
strides=strides,
padding=padding,
dimension_numbers=dimension_numbers,
parameterization='standard'
)
conv = lambda x: apply_fn((rhs, 0.), x)
_, g = vjp(conv, placeholder)
return g(lhs)[0]
@classmethod
def _conv_transpose_circular(
cls,
lhs,
params,
strides,
padding,
dimension_numbers
):
"""Helper method: calculates conv transpose."""
_, apply_fn, _ = stax.ConvTranspose(
out_chan=params[0].shape[dimension_numbers[1].index('O')],
filter_shape=(params[0].shape[dimension_numbers[1].index('H')],),
strides=strides,
padding=padding,
dimension_numbers=dimension_numbers,
parameterization='standard'
)
return apply_fn((params[0], 0.), lhs)
@test_utils.product(
filter_shape=[1, 2, 3, 4],
strides=[1, 2, 3, 4],
size=[1, 2, 3, 4]
)
def test_conv_transpose_circular(self, size, filter_shape, strides):
if size > 2:
test_utils.skip_test(self)
x = random.normal(random.PRNGKey(1), (2, size, 3))
dn = ('NHC', 'HIO', 'NHC')
padding = 'CIRCULAR'
filter_shape = (filter_shape,)
strides = (strides,)
init_fn, _, _ = stax.ConvTranspose(4, filter_shape, strides, padding)
_, params = init_fn(random.PRNGKey(2), x.shape)
f_conv = self._conv_transpose_circular(x, params, strides, padding, dn)
f_adj = self._conv_transpose_circular_via_grad(x, params, strides, padding,
dn)
self.assertAllClose(f_adj, f_conv)
class DotGeneralTest(test_utils.NeuralTangentsTestCase):
@test_utils.parameters(
dict(
same_inputs=same_inputs,
n=n,
batch_dims=batch_dims,
contracting_dims=contracting_dims,
b_dims=b_dims,
c_dims=c_dims,
r_permutation=r_permutation,
channel_axis=channel_axis,
batch_axis=batch_axis,
is_rhs=is_rhs,
diagonal_spatial=diagonal_spatial,
diagonal_batch=diagonal_batch
)
for same_inputs in [True, False]
for n in [2, 3]
for is_rhs in [False, True]
for batch_axis in range(n)
for channel_axis in [i for i in range(n) if i != batch_axis]
for diagonal_spatial in [True, False]
for diagonal_batch in [True, False]
for batch_dims in more_itertools.powerset(
i for i in range(n)
if i != channel_axis)
for contracting_dims in more_itertools.powerset(
i for i in range(n)
if i not in batch_dims + (channel_axis,))
for c_dims in itertools.permutations(contracting_dims)
for b_dims in itertools.permutations(batch_dims)
for r_permutation in itertools.permutations(range(n))
)
def test_dot_general(
self,
same_inputs,
n,
batch_dims,
contracting_dims,
c_dims,
b_dims,
r_permutation,
channel_axis,
is_rhs,
diagonal_spatial,
diagonal_batch,
batch_axis
):
if n != 2:
test_utils.skip_test(self)
if default_backend() == 'tpu':
atol = 1.
else:
atol = 0.1
n_b = 2
n_c = 1
key1, key2, key3 = random.split(random.PRNGKey(1), 3)
x_shape_n_c = [2, 4, 6, 8, 10, 12, 14][:n - 2]
x_shape = list(x_shape_n_c)
for a in sorted((batch_axis, channel_axis)):
x_shape.insert(a, n_b if a == batch_axis else n_c)
mask_constant = 10.
x1 = np.cos(random.normal(key1, x_shape))
mask1 = random.bernoulli(key1, p=0.8, shape=x1.shape)
x1 = np.where(mask1, mask_constant, x1)
if same_inputs:
x2 = None
else:
x2_shape = (x_shape[:batch_axis] +
[4 if (batch_axis not in contracting_dims + batch_dims)
else x_shape[batch_axis]] +
x_shape[batch_axis + 1:])
x2 = np.cos(random.normal(key2, x2_shape))
mask2 = random.bernoulli(key2, p=0.4, shape=x2.shape)
x2 = np.where(mask2, mask_constant, x2)
other_shape = [1, 3, 5, 7, 9, 11, 13, 15][:n]
for i in contracting_dims + batch_dims:
other_shape[i] = x_shape[i]
other = random.normal(key3, other_shape)
other = np.arange(np.size(other)).reshape(other_shape)
other_t = np.transpose(other, r_permutation)
r_c_dims = tuple(r_permutation.index(c) for c in c_dims)
r_b_dims = tuple(r_permutation.index(b) for b in b_dims)
if is_rhs:
lhs, rhs = None, other_t
dn = ((c_dims, r_c_dims), (b_dims, r_b_dims))
else:
lhs, rhs = other_t, None
dn = ((r_c_dims, c_dims), (r_b_dims, b_dims))
lhs_ndim = None if lhs is None else lhs.ndim
init_fn, apply_fn, kernel_fn = stax.DotGeneral(lhs=lhs,
rhs=rhs,
dimension_numbers=dn,
batch_axis=batch_axis)
def get_exact():
return kernel_fn(x1, x2,
diagonal_spatial=diagonal_spatial,
diagonal_batch=diagonal_batch,
batch_axis=batch_axis,
channel_axis=channel_axis,
mask_constant=mask_constant)
if (([i for i in c_dims if i not in (batch_axis, channel_axis)] and
diagonal_spatial) or
(batch_axis in c_dims and diagonal_batch)):
self.assertRaises(ValueError, get_exact)
else:
exact = get_exact()
out_c_axis = utils.axis_after_dot(channel_axis, c_dims, b_dims, lhs_ndim)
out_b_axis = utils.axis_after_dot(batch_axis, c_dims, b_dims, lhs_ndim)
def get_empirical(get):
def get_diagonal_axes():
axes = ()
if (get in ('cov1', 'cov2') and
diagonal_batch and
batch_axis not in c_dims):
axes += (out_b_axis,)
if diagonal_spatial:
axes += tuple(
utils.axis_after_dot(i, c_dims, b_dims, lhs_ndim)
for i in range(n)
if i not in c_dims + (batch_axis, channel_axis))
rhs_ndim = None if rhs is None else rhs.ndim
axes += tuple(
utils.axis_after_dot(i, r_c_dims, r_b_dims, rhs_ndim)
for i in range(n)
if i not in r_c_dims and
not (i in r_b_dims and b_dims[r_b_dims.index(i)] == batch_axis))
return axes
def batch_axes():
if batch_axis in contracting_dims:
return (), ()
axis = out_b_axis
if out_c_axis < axis:
axis -= 1
if not diagonal_spatial:
axis *= 2
if get in ('cov1', 'cov2') and diagonal_batch:
return (axis,), (0,)
return (axis, axis + 1), (0, 1)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key1,
n_samples=1,
trace_axes=(out_c_axis,),
diagonal_axes=get_diagonal_axes(),
device_count=-1 if (get == 'nngp' and
batch_axis == out_b_axis == 0 and
0 not in c_dims + b_dims) else 0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
)
empirical = kernel_fn_mc(x1=x2 if get == 'cov2' else x1,
x2=x2 if get == 'nngp' else None,
get='nngp',
batch_axis=batch_axis,
channel_axis=channel_axis,
mask_constant=mask_constant)
empirical = np.moveaxis(empirical, *batch_axes())
return empirical
for get in ('nngp', 'cov1', 'cov2'):
if get == 'cov2' and same_inputs:
continue
with self.subTest(get=get):
test_utils.assert_close_matrices(
self, get_empirical(get), getattr(exact, get), 0.01, atol)
@test_utils.product(
same_inputs=[False, True],
get=['ntk'],
do_pool=[True, False],
n=[3, 4],
is_rhs=[False, True],
dot_first=[True, False]
)
def test_dot_general_nn(
self,
same_inputs,
get,
n,
is_rhs,
do_pool,
dot_first
):
if n != 2:
test_utils.skip_test(self)
width = 2**8
n_samples = 2**8
tol = 0.03
key1, key2, key3 = random.split(random.PRNGKey(1), 3)
mask_constant = 10.
x_shape = [6, 3, 4, 5][:n - 1] + [1]
x1 = np.cos(random.normal(key1, x_shape))
mask1 = random.bernoulli(key1, p=0.8, shape=x1.shape)
x1 = np.where(mask1, mask_constant, x1)
if same_inputs:
x2 = None
else:
x2 = np.cos(random.normal(key2, x_shape))
mask2 = random.bernoulli(key2, p=0.4, shape=x2.shape)
x2 = np.where(mask2, mask_constant, x2)
other = random.normal(key3, [3, 4, 6, 2])
c_dims, b_dims = (1,), (0,)
o_c_dims, o_b_dims = (0,), (2,)
if is_rhs:
lhs, rhs = None, other
dn = ((c_dims, o_c_dims), (b_dims, o_b_dims))
else:
lhs, rhs = other, None
dn = ((o_c_dims, c_dims), (o_b_dims, b_dims))
lhs_ndim = None if lhs is None else lhs.ndim
out_c_axis = utils.axis_after_dot(n - 1, c_dims, b_dims, lhs_ndim)
out_b_axis = utils.axis_after_dot(0, c_dims, b_dims, lhs_ndim)
top_b_axis = int(out_b_axis > out_c_axis and do_pool)
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Identity() if dot_first else stax.Conv(
width, (3,) * (n - 2), padding='SAME'),
stax.DotGeneral(lhs=lhs,
rhs=rhs,
dimension_numbers=dn),
stax.Dense(width, batch_axis=out_b_axis, channel_axis=out_c_axis),
stax.Relu(),
(stax.GlobalAvgPool(channel_axis=out_c_axis,
batch_axis=out_b_axis) if do_pool else
stax.Flatten(batch_axis=out_b_axis)),
stax.Dense(
width if get == 'nngp' else 1, 0.9, 0.1,
batch_axis=top_b_axis,
channel_axis=int(out_c_axis > out_b_axis or not do_pool))
)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key1,
n_samples=n_samples,
trace_axes=(int(out_c_axis > out_b_axis) if do_pool else 1,),
device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
)
empirical = kernel_fn_mc(x1, x2, get, mask_constant=mask_constant)
exact = kernel_fn(x1, x2, get, mask_constant=mask_constant)
test_utils.assert_close_matrices(self, empirical, exact, tol)
def test_dot_general_mask(self):
x1, x2 = np.ones((4, 2, 3, 1)), np.ones((4, 2, 3, 1))
mask_constant = 10.
def get_k(x1, x2, m1, m2):
x1, x2 = np.where(m1, mask_constant, x1), np.where(m2, mask_constant, x2)
k_fn = stax.DotGeneral(
rhs=np.ones(x1.shape[:-1]),
dimension_numbers=(((1,), (1,)), ((2,), (2,))))[2]
k = k_fn(x1, x2, 'nngp', mask_constant=mask_constant)
return k
m1, m2 = np.zeros_like(x1, np.bool_), np.zeros_like(x2, np.bool_)
k = get_k(x1, x2, m1, m2)
self.assertAllClose(np.ones_like(k) * 4, k)
m1, m2 = np.ones_like(x1, np.bool_), np.zeros_like(x2, np.bool_)
k = get_k(x1, x2, m1, m2)
self.assertAllClose(np.zeros_like(k), k)
m1, m2 = np.ones_like(x1, np.bool_), np.ones_like(x2, np.bool_)
k = get_k(x1, x2, m1, m2)
self.assertAllClose(np.zeros_like(k), k)
m1 = np.concatenate([np.ones_like(x1[:2], np.bool_),
np.zeros_like(x1[2:], np.bool_)])
m2 = np.zeros_like(x2, np.bool_)
k = get_k(x1, x2, m1, m2)
self.assertAllClose(np.zeros_like(k[:2]), k[:2])
self.assertAllClose(np.full_like(k[2:], 4.), k[2:])
class ImageResizeTest(test_utils.NeuralTangentsTestCase):
@test_utils.parameters(
dict(
same_inputs=same_inputs,
n=n,
channel_axis=channel_axis,
batch_axis=batch_axis,
diagonal_spatial=diagonal_spatial,
diagonal_batch=diagonal_batch,
method=method,
antialias=antialias,
precision=precision,
shape=shape
)
for same_inputs in [
True,
False
]
for n in [
2,
3,
4
]
for batch_axis in range(n)
for channel_axis in [i for i in range(n) if i != batch_axis]
for diagonal_spatial in [
True,
False
]
for diagonal_batch in [
True,
False
]
for method in [
'linear',
'nearest'
]
for antialias in [
True,
False
]
for precision in [
lax.Precision.DEFAULT
]
for shape in [s[:n] for s in [
(-1, 2, 3, 4),
(-1, 3, -1, 4),
(10, 5, 1, 8),
(5, -1, 2, 3)
]]
)
def test_image_resize(
self,
same_inputs,
n,
channel_axis,
diagonal_spatial,
diagonal_batch,
batch_axis,
method,
antialias,
precision,
shape
):
if n > 2:
test_utils.skip_test(self)
n_b1, n_b2 = 2, 4
n_c = 1
key1, key2, _ = random.split(random.PRNGKey(1), 3)
shape = shape[:channel_axis] + (-1,) + shape[channel_axis + 1:]
x_shape_n_c = [2, 4, 6, 8, 10, 12, 14][:n - 2]
x_shape = list(x_shape_n_c)
for a in sorted((batch_axis, channel_axis)):
x_shape.insert(a, n_b1 if a == batch_axis else n_c)
mask_constant = 10.
x1 = np.cos(random.normal(key1, x_shape))
mask1 = random.bernoulli(key1, p=0.3, shape=x1.shape)
x1 = np.where(mask1, mask_constant, x1)
if same_inputs:
x2 = None
else:
x2_shape = (x_shape[:batch_axis] +
[n_b2] +
x_shape[batch_axis + 1:])
x2 = np.cos(random.normal(key2, x2_shape))
mask2 = random.bernoulli(key2, p=0.2, shape=x2.shape)
x2 = np.where(mask2, mask_constant, x2)
init_fn, apply_fn, kernel_fn = stax.ImageResize(method=method,
antialias=antialias,
precision=precision,
batch_axis=batch_axis,
channel_axis=channel_axis,
shape=shape
)
def get_exact():
return kernel_fn(x1, x2,
diagonal_spatial=diagonal_spatial,
diagonal_batch=diagonal_batch,
batch_axis=batch_axis,
channel_axis=channel_axis,
mask_constant=mask_constant
)
if ((shape[batch_axis] != -1 and diagonal_batch) or
(any(shape[i] != -1 for i in range(len(shape))
if i not in (batch_axis, channel_axis)) and diagonal_spatial)):
self.assertRaises(ValueError, get_exact)
else:
exact = get_exact()
def get_empirical(get):
def get_diagonal_axes():
axes = ()
if get in ('cov1', 'cov2') and diagonal_batch:
axes += (batch_axis,)
if diagonal_spatial:
axes += tuple(i for i in range(n)
if i not in (batch_axis, channel_axis))
return axes
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key1,
n_samples=1,
trace_axes=(channel_axis,),
diagonal_axes=get_diagonal_axes(),
device_count=-1 if (get == 'nngp' and
batch_axis == 0 and
shape[batch_axis] == -1) else 0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
)
empirical = kernel_fn_mc(x1=x2 if get == 'cov2' else x1,
x2=x2 if get == 'nngp' else None,
get='nngp',
batch_axis=batch_axis,
channel_axis=channel_axis,
mask_constant=mask_constant
)
def batch_axes():
axis = batch_axis
if channel_axis < batch_axis:
axis -= 1
if not diagonal_spatial:
axis *= 2
if get in ('cov1', 'cov2') and diagonal_batch:
return (axis,), (0,)
return (axis, axis + 1), (0, 1)
empirical = np.moveaxis(empirical, *batch_axes())
return empirical
for get in ('nngp', 'cov1', 'cov2'):
if get == 'cov2' and same_inputs:
continue
with self.subTest(get=get):
tol = 1e-2 if default_backend() == 'tpu' else 1e-5
test_utils.assert_close_matrices(
self, get_empirical(get), getattr(exact, get), tol)
@test_utils.product(
same_inputs=[False, True],
get=['ntk'],
do_pool=[True, False],
n=[3],
bottom_layer=['resize', 'conv', 'relu'],
method=['linear', 'nearest'],
shape=[
(1, 2, 4),
(2, 1, 1),
(-1, 2, -1),
(2, 4, -1),
(9, -1, -1),
(-1, -1, -1),
(3, 4, -1),
(1, 1, -1),
]
)
def test_image_resize_nn(
self,
same_inputs,
get,
n,
do_pool,
bottom_layer,
method,
shape
):
if n != 2:
test_utils.skip_test(self)
width = 2**7
n_samples = 2**7
tol = 0.03
key1, key2, _ = random.split(random.PRNGKey(1), 3)
mask_constant = 10.
x_shape = [6, 3, 4, 5][:n - 1] + [1]
x1 = np.cos(random.normal(key1, x_shape))
mask1 = random.bernoulli(key1, p=0.2, shape=x1.shape)
x1 = np.where(mask1, mask_constant, x1)
if same_inputs:
x2 = None
else:
x2 = np.cos(random.normal(key2, x_shape))
mask2 = random.bernoulli(key2, p=0.1, shape=x2.shape)
x2 = np.where(mask2, mask_constant, x2)
bottom = {'conv': stax.Conv(width, (3,) * (n - 2), padding='SAME'),
'relu': stax.serial(
stax.Conv(width, (3,) * (n - 2), padding='SAME'),
stax.Relu()),
'resize': stax.Identity()}[bottom_layer]
init_fn, apply_fn, kernel_fn = stax.serial(
bottom,
stax.ImageResize(method=method,
shape=shape),
stax.Conv(width, (2,), padding='SAME'),
stax.Relu(),
(stax.GlobalAvgPool() if do_pool else stax.Flatten()),
stax.Dense(width if get == 'nngp' else 1, 0.9, 0.1)
)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key1,
n_samples=n_samples,
device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
)
empirical = kernel_fn_mc(x1, x2, get, mask_constant=mask_constant)
def get_exact():
return kernel_fn(x1, x2, get, mask_constant=mask_constant)
if shape[-1] != -1:
# Make sure an error is thrown if resizing a channel axis is requested.
self.assertRaises(ValueError, get_exact)
else:
exact = get_exact()
test_utils.assert_close_matrices(self, empirical, exact, tol)
class ConvLocalTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
diagonal_spatial=[True, False]
)
def test_whitened_inputs(self, diagonal_spatial):
test_utils.skip_test(self)
x = np.cos(random.normal(random.PRNGKey(1), (4 * 8 * 8, 512)))
cov = x @ x.T
whiten = np.linalg.cholesky(np.linalg.inv(cov))
x_white = whiten.T @ x
cov_white = x_white @ x_white.T
self.assertAllClose(np.eye(x.shape[0]), cov_white)
width = 256
scales = random.normal(random.PRNGKey(2), (4, 8, 8, 1))
x_white = x_white.reshape((4, 8, 8, 512)) * scales
x = x.reshape(x_white.shape) * scales
init_fn, apply_fn, kernel_fn = stax.serial(
stax.AvgPool((2, 3)),
stax.ConvLocal(width, (3, 1), padding='SAME', W_std=4.2, b_std=0.09),
stax.Relu(),
stax.Conv(width, (2, 3), padding='SAME', W_std=3.8, b_std=0.04),
stax.Relu(),
stax.ConvLocal(width, (2, 2), padding='SAME', W_std=6.4, b_std=0.1),
stax.GlobalAvgPool())
k_white = kernel_fn(x_white, None, diagonal_spatial=diagonal_spatial)
self._test_against_mc(apply_fn, init_fn, k_white.nngp, x_white, None)
k = kernel_fn(x, None, diagonal_spatial=diagonal_spatial)
if diagonal_spatial:
with self.assertRaises(AssertionError):
self._test_against_mc(apply_fn, init_fn, k.nngp, x, None)
else:
self._test_against_mc(apply_fn, init_fn, k.nngp, x, None)
@test_utils.product(
padding=['SAME', 'VALID', 'CIRCULAR'],
same_inputs=[False],
filter_shape=[2, 3],
strides=[1, 2],
size=[2, 3],
diagonal_batch=[True],
diagonal_spatial=[True, False],
get=['cov1', 'nngp', 'ntk'],
parameterization=['standard', 'ntk']
)
def test_conv_local(
self,
same_inputs,
padding,
filter_shape,
strides,
size,
diagonal_batch,
diagonal_spatial,
get,
parameterization
):
test_utils.skip_test(self)
if diagonal_batch and get != 'cov1':
raise absltest.SkipTest('Checking `diagonal_batch` only on `cov1`.')
key1, key2, key_mc = random.split(random.PRNGKey(1), 3)
shape = (size, 1)
x1 = random.normal(key1, (2,) + shape)
x2 = random.normal(key2, (3,) + shape) if not same_inputs else None
kernel_kwargs = dict(diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial)
conv_kwargs = dict(out_chan=512,
filter_shape=(filter_shape,),
strides=(strides,),
padding=padding,
b_std=0.2,
W_std=1.5,
parameterization=parameterization)
init_fn, apply_fn, kernel_fn = stax.ConvLocal(**conv_kwargs)
k = kernel_fn(x1, x2, **kernel_kwargs)
# Compared to MC estimate
diagonal_axes = ()
if diagonal_batch:
diagonal_axes += (0,)
if diagonal_spatial:
diagonal_axes += (1,)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, key_mc, n_samples=512, diagonal_axes=diagonal_axes,
device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
k_mc = kernel_fn_mc(x1, None if get == 'cov1' else x2,
'nngp' if get == 'cov1' else get)
test_utils.assert_close_matrices(self, k_mc, getattr(k, get), 0.011, 1.)
# Compared diagonal entries to CNN
_, _, kernel_fn_conv = stax.Conv(**conv_kwargs)
k_conv = kernel_fn_conv(x1, x2, **kernel_kwargs)
if not diagonal_spatial:
def get_diag(k):
k = getattr(k, get)
k = np.diagonal(k, axis1=-1, axis2=-2)
return k
k_conv = get_diag(k_conv)
k = get_diag(k)
tol = 0.005 if default_backend() == 'tpu' else 0.001
self.assertAllClose(k_conv, k, atol=tol, rtol=tol)
@test_utils.product(
pool=[
stax.Identity(),
stax.AvgPool((2, 3), (2, 1), 'VALID')
],
readout=[
stax.Flatten(),
stax.GlobalAvgPool()
],
same_inputs=[False],
get=['ntk'],
parameterization=['ntk', 'standard']
)
def test_conv_local_deep(
self,
get,
pool,
same_inputs,
readout,
parameterization
):
test_utils.skip_test(self)
key1, key2, key_mc = random.split(random.PRNGKey(1), 3)
x1 = random.normal(key1, (2, 7, 8, 3))
x2 = random.normal(key2, (3, 7, 8, 3)) if not same_inputs else None
def get_nn(conv):
width = 256
return stax.serial(
conv(width, (2, 3), (2, 1), padding='CIRCULAR', W_std=1.5, b_std=0.2,
parameterization=parameterization),
pool,
stax.Erf(),
conv(width, (3, 1), (1, 2), padding='SAME'),
stax.Relu(),
conv(width, (2, 3), (2, 1), padding='VALID', W_std=1.2, b_std=0.3,
parameterization=parameterization),
readout,
stax.Dense(1 if get == 'ntk' else width)
)
init_fn, apply_fn, kernel_fn_local = get_nn(stax.ConvLocal)
k_local = kernel_fn_local(x1, x2, get)
# Test results for consistency with different diagonalizations.
for diagonal_batch in [True]:
for diagonal_spatial in [True, False]:
kwargs = dict(get=get,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial)
with self.subTest(**kwargs):
k_local_d = kernel_fn_local(x1, x2, **kwargs)
test_utils.assert_close_matrices(self, k_local, k_local_d, 0.01)
# Test against CNN-GP diagonal if only flattening is used.
if pool[0].__name__ == 'Identity' and readout[0].__name__ == 'Flatten':
_, _, kernel_fn_conv = get_nn(stax.Conv)
k_conv = kernel_fn_conv(x1, x2, get)
self.assertAllClose(k_conv, k_local)
# Test against MC.
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, key_mc, n_samples=512, device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
k_mc = kernel_fn_mc(x1, x2, get)
test_utils.assert_close_matrices(self, k_mc, k_local, 0.015, 1.)
def test_conv_local_conv(self):
test_utils.skip_test(self, platforms=('cpu', 'tpu'))
key1, key2 = random.split(random.PRNGKey(1), 2)
x1 = np.cos(random.normal(key1, (5, 32, 32, 1)))
x2 = np.sin(random.normal(key2, (5, 32, 32, 1)))
width = 128
local_conv = stax.serial(stax.ConvLocal(width, (3, 2)),
stax.AvgPool((2, 3), padding='SAME'),
stax.Relu(),
stax.ConvLocal(width, (1, 2), padding='SAME'),
stax.AvgPool((2, 1), padding='SAME'),
stax.Relu(),
stax.Conv(width, (3, 3), padding='SAME'),
stax.Relu(),
stax.Conv(width, (3, 3), padding='SAME'))
init_fn, apply_fn, kernel_fn = local_conv
# No projection layer
k = kernel_fn(x1, x2)
self.assertEqual(k.diagonal_spatial, False)
self._test_against_mc(apply_fn, init_fn, k.nngp, x1, x2, 0.03)
# Top layer flat
init_fn, apply_fn, kernel_fn = stax.serial(local_conv, stax.Flatten())
k_jit = jit(lambda x1, x2: kernel_fn(x1, x2))
k_jit(x2, x1).nngp.block_until_ready()
time_flat = time.time()
k = k_jit(x1, x2).nngp.block_until_ready()
time_flat = time.time() - time_flat
self._test_against_mc(apply_fn, init_fn, k, x1, x2, 0.03)
# Top layer pooling
init_fn, apply_fn, kernel_fn = stax.serial(local_conv, stax.GlobalAvgPool())
k_jit = jit(lambda x1, x2: kernel_fn(x1, x2))
k_jit(x2, x1).nngp.block_until_ready()
time_pool = time.time()
k = k_jit(x1, x2).nngp.block_until_ready()
time_pool = time.time() - time_pool
self.assertLess(time_flat * 4, time_pool)
self._test_against_mc(apply_fn, init_fn, k, x1, x2, 0.03)
# Top layer LCN + pooling
init_fn, apply_fn, kernel_fn = stax.serial(local_conv,
stax.ConvLocal(width, (2, 2),
padding='SAME'),
stax.GlobalAvgPool())
k_jit = jit(lambda x1, x2: kernel_fn(x1, x2))
k_jit(x2, x1).nngp.block_until_ready()
time_lcn_pool = time.time()
k = k_jit(x1, x2).nngp.block_until_ready()
time_lcn_pool = time.time() - time_lcn_pool
self.assertLess(time_lcn_pool * 5, time_pool)
self._test_against_mc(apply_fn, init_fn, k, x1, x2, 0.03)
def test_double_pool(self):
test_utils.skip_test(self)
key1, key2 = random.split(random.PRNGKey(1), 2)
x1 = np.cos(random.normal(key1, (2, 4, 6, 3)))
x2 = np.sin(random.normal(key2, (3, 4, 6, 3)))
width = 256
single_pool = stax.serial(stax.ConvLocal(width, (2, 3),
W_std=2., b_std=0.01),
stax.AvgPool((3, 2)))
init_fn, apply_fn, kernel_fn = stax.serial(single_pool,
stax.Flatten())
k_single = kernel_fn(x1, x2)
self._test_against_mc(apply_fn, init_fn, k_single.nngp, x1, x2, 0.05)
init_fn, apply_fn, kernel_fn = stax.serial(single_pool,
stax.AvgPool((1, 2)),
stax.Flatten())
k_double = kernel_fn(x1, x2)
self._test_against_mc(apply_fn, init_fn, k_double.nngp, x1, x2, 0.05)
def _test_against_mc(self, apply_fn, init_fn, k, x1, x2, tol=0.01, n=256):
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn, apply_fn, random.PRNGKey(2), n_samples=n, device_count=0,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
k_mc = kernel_fn_mc(x1, x2, 'nngp')
test_utils.assert_close_matrices(self, k_mc, k, tol)
class IndexTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
same_inputs=[
True,
False
],
get=[
'nngp',
'ntk',
'cov1',
'cov2',
],
index_layer=[
0,
1,
2,
3
],
mask_constant=[
None,
10.
],
idx=[
stax.Slice[0],
stax.Slice[-1],
stax.Slice[:],
stax.Slice[:, 0],
stax.Slice[:, -1],
stax.Slice[:, -3:],
stax.Slice[:, :],
stax.Slice[::2],
stax.Slice[...],
stax.Slice[0, ...],
stax.Slice[1:2, ...],
stax.Slice[0:2, ...],
stax.Slice[:, ::-2, ...],
stax.Slice[::2, ::-2, 0, ...],
stax.Slice[..., 1],
stax.Slice[..., :2],
stax.Slice[::2, 1, ...],
stax.Slice[:, 1, -1, :],
stax.Slice[..., 1::2],
stax.Slice[:3, 1, 2],
stax.Slice[:2, :2, :2],
stax.Slice[..., ::2],
stax.Slice[1:2:-1, 1, 2],
stax.Slice[:, 0, :],
],
readout=[
stax.GlobalAvgPool,
stax.Flatten,
]
)
def test_index(
self,
same_inputs,
get,
index_layer,
mask_constant,
idx,
readout,
):
if index_layer == 3 and isinstance(idx, tuple) and len(idx) > 2:
raise absltest.SkipTest(f'Readout outputs have only 2 dimensions, but '
f'the index has {len(idx)}.')
if get == 'cov2' and same_inputs:
raise absltest.SkipTest('cov2 is None when x2 is None.')
width = 2**7
n_samples = 2**7
tol = 0.05
key1, key2, key_mc = random.split(random.PRNGKey(1), 3)
x1 = np.cos(random.normal(key1, [6, 3, 4, 5]))
if mask_constant is not None:
mask1 = random.bernoulli(key1, p=0.2, shape=x1.shape)
x1 = np.where(mask1, mask_constant, x1)
if same_inputs:
x2 = None
else:
x2 = np.cos(random.normal(key2, [7, 3, 4, 5]))
if mask_constant is not None:
mask2 = random.bernoulli(key2, p=0.1, shape=x2.shape)
x2 = np.where(mask2, mask_constant, x2)
canonical_idx = utils.canonicalize_idx(
idx=idx,
ndim=x1.ndim if index_layer != 3 else 2
)
filter_shape = (2, 3)
if index_layer == 0:
for i, s in enumerate(canonical_idx):
if isinstance(s, int) and i in (1, 2):
filter_shape = filter_shape[:-1]
layers = [
stax.Conv(width, filter_shape, padding='SAME'),
stax.Relu(),
readout(),
stax.Dense(1 if get == 'ntk' else width)
]
layers.insert(index_layer, stax.Index(idx=idx))
init_fn, apply_fn, kernel_fn = stax.serial(*layers)
def get_exact():
return kernel_fn(x1, x2, get, mask_constant=mask_constant)
if isinstance(canonical_idx[0], int) or canonical_idx[-1] != slice(None):
# Unsupported integer indexing into batch axis, or any indexing into
# the channel axis.
self.assertRaises(NotImplementedError, get_exact)
else:
exact = get_exact()
if get in ('cov1', 'cov2'):
diagonal_axes = (0,)
get_e = 'nngp'
if get == 'cov1':
x1_e, x2_e = x1, None
elif get == 'cov2':
x1_e, x2_e = x2, None
else:
diagonal_axes = ()
x1_e, x2_e = x1, x2
get_e = get
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key_mc,
n_samples=n_samples,
device_count=0,
diagonal_axes=diagonal_axes,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
)
empirical = kernel_fn_mc(x1_e, x2_e, get_e, mask_constant=mask_constant)
test_utils.assert_close_matrices(self, empirical, exact, tol)
if __name__ == '__main__':
absltest.main()
| 58,065 | 30.782157 | 80 | py |
neural-tangents | neural-tangents-main/tests/stax/requirements_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/_src/stax/requirements.py`."""
import itertools
import random as prandom
from absl.testing import absltest
from jax import default_backend
from jax import jit
from jax import random
from jax.config import config
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
from neural_tangents._src.empirical import _DEFAULT_TESTING_NTK_IMPLEMENTATION
from tests import test_utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
test_utils.update_test_tolerance()
prandom.seed(1)
@test_utils.product(
same_inputs=[False, True],
readout=[
stax.Flatten(),
stax.GlobalAvgPool(),
stax.Identity()
],
readin=[
stax.Flatten(),
stax.GlobalAvgPool(),
stax.Identity()
]
)
class DiagonalTest(test_utils.NeuralTangentsTestCase):
def _get_kernel_fn(self, same_inputs, readin, readout):
key = random.PRNGKey(1)
x1 = random.normal(key, (2, 5, 6, 3))
x2 = None if same_inputs else random.normal(key, (3, 5, 6, 3))
layers = [readin]
filter_shape = (2, 3) if readin[0].__name__ == 'Identity' else ()
layers += [stax.Conv(1, filter_shape, padding='SAME'),
stax.Relu(),
stax.Conv(1, filter_shape, padding='SAME'),
stax.Erf(),
readout]
_, _, kernel_fn = stax.serial(*layers)
return kernel_fn, x1, x2
def test_diagonal_batch(self, same_inputs, readin, readout):
kernel_fn, x1, x2 = self._get_kernel_fn(same_inputs, readin, readout)
K = kernel_fn(x1, x2)
K_full = kernel_fn(x1, x2, diagonal_batch=False)
if same_inputs:
self.assertAllClose(K_full.cov1, K.nngp)
self.assertAllClose(K_full.cov2, K.cov2)
else:
self.assertAllClose(K_full.cov1, kernel_fn(x1, None).nngp)
self.assertAllClose(K_full.cov2, kernel_fn(x2, None).nngp)
K_full = K_full.replace(cov1=K.cov1, cov2=K.cov2,
diagonal_batch=K.diagonal_batch)
self.assertAllClose(K_full, K)
def test_diagonal_spatial(self, same_inputs, readin, readout):
kernel_fn, x1, x2 = self._get_kernel_fn(same_inputs, readin, readout)
K = kernel_fn(x1, x2)
K_full = kernel_fn(x1, x2, diagonal_spatial=False)
batch_shape = x1.shape[0], (x1 if x2 is None else x2).shape[0]
names = readout[0].__name__, readin[0].__name__
if 'GlobalAvgPool' in names:
if (readout[0].__name__ == 'GlobalAvgPool' and
readin[0].__name__ == 'Identity'):
self.assertRaises(ValueError, kernel_fn, x1, x2, diagonal_spatial=True)
self.assertEqual(K_full.nngp.shape, batch_shape)
self.assertAllClose(K_full, K)
else:
K_diag = kernel_fn(x1, x2, diagonal_spatial=True)
if 'Flatten' in names:
self.assertEqual(K_diag.nngp.shape, batch_shape)
self.assertAllClose(K_diag, K)
self.assertAllClose(K_diag, K_full)
else:
self.assertEqual(K_diag.nngp.shape, batch_shape + x1.shape[1:-1])
self.assertAllClose(K_full, K)
self.assertAllClose(K_diag.nngp, np.einsum('...iijj->...ij', K.nngp))
class DiagonalClassTest(test_utils.NeuralTangentsTestCase):
def test_diagonal_compose_is_associative(self):
for inp_a, inp_b, inp_c in itertools.product(stax.Bool, repeat=3):
for out_a, out_b, out_c in itertools.product(stax.Bool, repeat=3):
a = stax.Diagonal(inp_a, out_a)
b = stax.Diagonal(inp_b, out_b)
c = stax.Diagonal(inp_c, out_c)
with self.subTest(a=a, b=b, c=c):
ab_c = (a >> b) >> c
a_bc = a >> (b >> c)
self.assertEqual(ab_c, a_bc)
_ab_c = c << (b << a)
_a_bc = (c << b) << a
self.assertEqual(_ab_c, _a_bc)
self.assertEqual(ab_c, _ab_c)
@test_utils.product(
same_inputs=[True, False]
)
class InputReqTest(test_utils.NeuralTangentsTestCase):
def test_input_req(self, same_inputs):
test_utils.skip_test(self)
key = random.PRNGKey(1)
x1 = random.normal(key, (2, 7, 8, 4, 3))
x2 = None if same_inputs else random.normal(key, (4, 7, 8, 4, 3))
_, _, wrong_conv_fn = stax.serial(
stax.Conv(out_chan=1, filter_shape=(1, 2, 3),
dimension_numbers=('NDHWC', 'HDWIO', 'NCDWH')),
stax.Relu(),
stax.Conv(out_chan=1, filter_shape=(1, 2, 3),
dimension_numbers=('NHDWC', 'HWDIO', 'NCWHD'))
)
with self.assertRaises(ValueError):
wrong_conv_fn(x1, x2)
init_fn, apply_fn, correct_conv_fn = stax.serial(
stax.Conv(out_chan=1024, filter_shape=(1, 2, 3),
dimension_numbers=('NHWDC', 'DHWIO', 'NCWDH')),
stax.Relu(),
stax.Conv(out_chan=1024, filter_shape=(1, 2, 3),
dimension_numbers=('NCHDW', 'WHDIO', 'NCDWH')),
stax.Flatten(),
stax.Dense(1024)
)
correct_conv_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=400,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
K = correct_conv_fn(x1, x2, get='nngp')
K_mc = correct_conv_fn_mc(x1, x2, get='nngp')
self.assertAllClose(K, K_mc, atol=0.01, rtol=0.05)
_, _, wrong_conv_fn = stax.serial(
stax.Conv(out_chan=1, filter_shape=(1, 2, 3),
dimension_numbers=('NDHWC', 'HDWIO', 'NCDWH')),
stax.GlobalAvgPool(channel_axis=2)
)
with self.assertRaises(ValueError):
wrong_conv_fn(x1, x2)
init_fn, apply_fn, correct_conv_fn = stax.serial(
stax.Conv(out_chan=1024, filter_shape=(1, 2, 3),
dimension_numbers=('NHDWC', 'DHWIO', 'NDWCH')),
stax.Relu(),
stax.AvgPool((2, 1, 3), batch_axis=0, channel_axis=-2),
stax.Conv(out_chan=1024, filter_shape=(1, 2, 3),
dimension_numbers=('NDHCW', 'IHWDO', 'NDCHW')),
stax.Relu(),
stax.GlobalAvgPool(channel_axis=2),
stax.Dense(1024)
)
correct_conv_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=300,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
K = correct_conv_fn(x1, x2, get='nngp')
K_mc = correct_conv_fn_mc(x1, x2, get='nngp')
self.assertAllClose(K, K_mc, atol=0.01, rtol=0.05)
_, _, wrong_conv_fn = stax.serial(
stax.Flatten(),
stax.Dense(1),
stax.Erf(),
stax.Conv(out_chan=1, filter_shape=(1, 2),
dimension_numbers=('CN', 'IO', 'NC')),
)
with self.assertRaises(ValueError):
wrong_conv_fn(x1, x2)
init_fn, apply_fn, correct_conv_fn = stax.serial(
stax.Flatten(),
stax.Conv(out_chan=1024, filter_shape=()),
stax.Relu(),
stax.Dense(1)
)
correct_conv_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=200,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=0
)
K = correct_conv_fn(x1, x2, get='ntk')
K_mc = correct_conv_fn_mc(x1, x2, get='ntk')
self.assertAllClose(K, K_mc, atol=0.01, rtol=0.05)
class MaskingTest(test_utils.NeuralTangentsTestCase):
@test_utils.product(
same_inputs=[False],
get=['ntk'],
concat=[None, 0, 1],
p=[0.5],
mask_axis=[
(),
(0,),
(1, 3)
],
mask_constant=[10.]
)
def test_mask_fc(self, same_inputs, get, concat, p, mask_axis, mask_constant):
width = 512
n_samples = 128
tol = 0.04
key = random.PRNGKey(1)
x1 = random.normal(key, (4, 6, 5, 7))
x1 = test_utils.mask(x1, mask_constant, mask_axis, key, p)
if same_inputs:
x2 = None
else:
x2 = random.normal(key, (2, 6, 5, 7))
x2 = test_utils.mask(x2, mask_constant, mask_axis, key, p)
nn = stax.serial(
stax.Flatten(),
stax.FanOut(3),
stax.parallel(
stax.serial(
stax.Dense(width, 1., 0.1),
stax.Abs(),
stax.DotGeneral(lhs=-0.2),
stax.Dense(width, 1.5, 0.01),
),
stax.serial(
stax.Dense(width, 1.1, 0.1),
stax.DotGeneral(rhs=0.7),
stax.Erf(),
stax.Dense(width if concat != 1 else 512, 1.5, 0.1),
),
stax.serial(
stax.DotGeneral(rhs=0.5),
stax.Dense(width, 1.2),
stax.ABRelu(-0.2, 0.4),
stax.Dense(width if concat != 1 else 1024, 1.3, 0.2),
)
),
(stax.FanInSum() if concat is None else stax.FanInConcat(concat)),
stax.Dense(width, 2., 0.01),
stax.Relu()
)
if get == 'nngp':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(width, 1., 0.1))
elif get == 'ntk':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(1, 1., 0.1))
else:
raise ValueError(get)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=n_samples,
device_count=0 if concat in (0, -2) else -1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=None if concat in (0, -2) else 0,
)
kernel_fn = jit(kernel_fn, static_argnames='get')
exact = kernel_fn(x1, x2, get, mask_constant=mask_constant)
empirical = kernel_fn_mc(x1, x2, get=get, mask_constant=mask_constant)
test_utils.assert_close_matrices(self, empirical, exact, tol)
@test_utils.product(
proj=['flatten', 'avg'],
same_inputs=[False],
get=['ntk'],
n=[0, 1],
concat=[None, 0, 1],
mask_constant=[10.],
p=[0.5],
transpose=[True, False],
mask_axis=[(), (0,), (0, 1, 2, 3)]
)
def test_mask_conv(
self,
same_inputs,
get,
mask_axis,
mask_constant,
concat,
proj,
p,
n,
transpose
):
if isinstance(concat, int) and concat > n:
raise absltest.SkipTest('Concatenation axis out of bounds.')
test_utils.skip_test(self)
if default_backend() == 'gpu' and n > 3:
raise absltest.SkipTest('>=4D-CNN is not supported on GPUs.')
width = 256
n_samples = 256
tol = 0.03
key = random.PRNGKey(1)
spatial_shape = ((1, 2, 3, 2, 1) if transpose else (15, 8, 9))[:n]
filter_shape = ((2, 3, 1, 2, 1) if transpose else (7, 2, 3))[:n]
strides = (2, 1, 3, 2, 3)[:n]
spatial_spec = 'HWDZX'[:n]
dimension_numbers = ('N' + spatial_spec + 'C',
'OI' + spatial_spec,
'N' + spatial_spec + 'C')
x1 = np.cos(random.normal(key, (2,) + spatial_shape + (2,)))
x1 = test_utils.mask(x1, mask_constant, mask_axis, key, p)
if same_inputs:
x2 = None
else:
x2 = np.cos(random.normal(key, (4,) + spatial_shape + (2,)))
x2 = test_utils.mask(x2, mask_constant, mask_axis, key, p)
def get_attn():
return stax.GlobalSelfAttention(
n_chan_out=width,
n_chan_key=width,
n_chan_val=int(np.round(float(width) / int(np.sqrt(width)))),
n_heads=int(np.sqrt(width)),
) if proj == 'avg' else stax.Identity()
conv = stax.ConvTranspose if transpose else stax.Conv
nn = stax.serial(
stax.FanOut(3),
stax.parallel(
stax.serial(
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='CIRCULAR',
W_std=1.5,
b_std=0.2),
stax.LayerNorm(axis=(1, -1)),
stax.Abs(),
stax.DotGeneral(rhs=0.9),
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='VALID',
W_std=1.2,
b_std=0.1),
),
stax.serial(
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='SAME',
W_std=0.1,
b_std=0.3),
stax.Relu(),
stax.Dropout(0.7),
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='VALID',
W_std=0.9,
b_std=1.),
),
stax.serial(
get_attn(),
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='CIRCULAR',
W_std=1.,
b_std=0.1),
stax.Erf(),
stax.Dropout(0.2),
stax.DotGeneral(rhs=0.7),
conv(
dimension_numbers=dimension_numbers,
out_chan=width,
strides=strides,
filter_shape=filter_shape,
padding='VALID',
W_std=1.,
b_std=0.1),
)
),
(stax.FanInSum() if concat is None else stax.FanInConcat(concat)),
get_attn(),
{
'avg': stax.GlobalAvgPool(),
'sum': stax.GlobalSumPool(),
'flatten': stax.Flatten(),
}[proj],
)
if get == 'nngp':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(width, 1., 0.))
elif get == 'ntk':
init_fn, apply_fn, kernel_fn = stax.serial(nn, stax.Dense(1, 1., 0.))
else:
raise ValueError(get)
kernel_fn_mc = nt.monte_carlo_kernel_fn(
init_fn=init_fn,
apply_fn=stax.unmask_fn(apply_fn),
key=key,
n_samples=n_samples,
device_count=0 if concat in (0, -n) else -1,
implementation=_DEFAULT_TESTING_NTK_IMPLEMENTATION,
vmap_axes=None if concat in (0, -n) else 0,
)
kernel_fn = jit(kernel_fn, static_argnames='get')
exact = kernel_fn(x1, x2, get, mask_constant=mask_constant)
empirical = kernel_fn_mc(x1, x2, get=get, mask_constant=mask_constant)
test_utils.assert_close_matrices(self, empirical, exact, tol)
if __name__ == '__main__':
absltest.main()
| 15,503 | 31.099379 | 80 | py |
neural-tangents | neural-tangents-main/tests/experimental/empirical_tf_test.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `experimental/empirical_tf/empirical.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as np
import neural_tangents as nt
from neural_tangents import experimental
import numpy as onp
import tensorflow as tf
tf.random.set_seed(1)
# TF module copied from https://www.tensorflow.org/api_docs/python/tf/Module
class _Dense(tf.Module):
def __init__(self, input_dim, output_size, name=None):
super(_Dense, self).__init__(name=name)
self.w = tf.Variable(
tf.random.normal([input_dim, output_size]), name='w')
self.b = tf.Variable(tf.zeros([1, output_size]), name='b')
def __call__(self, x):
y = tf.matmul(x, self.w) / x.shape[-1]**0.5 + self.b
return tf.nn.relu(y)
class _MLP(tf.Module):
def __init__(self, input_size, sizes, name=None):
super(_MLP, self).__init__(name=name)
self.input_shape = (None, input_size)
self.layers = []
with self.name_scope:
for size in sizes:
self.layers.append(_Dense(input_dim=input_size, output_size=size))
input_size = size
@tf.Module.with_name_scope
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
# Functions to compare TF/JAX manually.
_input_signature = [tf.TensorSpec((1, 2, 1, 4)),
tf.TensorSpec((None, 2, 3, 1))]
def _f1(params, x):
return x * tf.reduce_mean(params**2) + 1.
def _f1_jax(params, x):
return x * np.mean(params**2) + 1.
def _f2(params, x):
return tf.reduce_mean(x) * params**2 + 1.
def _f2_jax(params, x):
return np.mean(x) * params**2 + 1.
def _f3(params, x):
return _f1(params, _f1(params, x)) + tf.reduce_mean(_f2(params, x))
def _f3_jax(params, x):
return _f1_jax(params, _f1_jax(params, x)) + np.mean(_f2_jax(params, x))
def _f4(params, x):
return _f1(params, x) + tf.reduce_mean(_f2(params, _f3(params, x)))
def _f4_jax(params, x):
return _f1_jax(params, x) + np.mean(_f2_jax(params, _f3_jax(params, x)))
# ResNet18 adapted from
# https://github.com/jimmyyhwu/resnet18-tf2/blob/master/resnet.py
_kaiming_normal = tf.keras.initializers.VarianceScaling(
scale=2.0, mode='fan_out', distribution='untruncated_normal')
def _conv3x3(x, out_planes, stride=1, name=None):
x = tf.keras.layers.ZeroPadding2D(padding=1, name=f'{name}_pad')(x)
return tf.keras.layers.Conv2D(
filters=out_planes, kernel_size=3, strides=stride, use_bias=False,
kernel_initializer=_kaiming_normal, name=name)(x)
def _basic_block(x, planes, stride=1, downsample=None, name=None):
identity = x
out = _conv3x3(x, planes, stride=stride, name=f'{name}.conv1')
out = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
name=f'{name}.bn1')(out)
out = tf.keras.layers.ReLU(name=f'{name}.relu1')(out)
out = _conv3x3(out, planes, name=f'{name}.conv2')
out = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
name=f'{name}.bn2')(out)
if downsample is not None:
for layer in downsample:
identity = layer(identity)
out = tf.keras.layers.Add(name=f'{name}.add')([identity, out])
out = tf.keras.layers.ReLU(name=f'{name}.relu2')(out)
return out
def _make_layer(x, planes, blocks, stride=1, name=None):
downsample = None
inplanes = x.shape[3]
if stride != 1 or inplanes != planes:
downsample = [
tf.keras.layers.Conv2D(
filters=planes, kernel_size=1, strides=stride,
use_bias=False, kernel_initializer=_kaiming_normal,
name=f'{name}.0.downsample.0'),
tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
name=f'{name}.0.downsample.1'),
]
x = _basic_block(x, planes, stride, downsample, name=f'{name}.0')
for i in range(1, blocks):
x = _basic_block(x, planes, name=f'{name}.{i}')
return x
def _resnet(x, blocks_per_layer, classes, filters):
x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(x)
x = tf.keras.layers.Conv2D(
filters=filters, kernel_size=7, strides=2, use_bias=False,
kernel_initializer=_kaiming_normal, name='conv1')(x)
x = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
name='bn1')(x)
x = tf.keras.layers.ReLU(name='relu1')(x)
x = tf.keras.layers.ZeroPadding2D(padding=1, name='maxpool_pad')(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, name='maxpool')(x)
x = _make_layer(x, filters, blocks_per_layer[0], name='layer1')
x = tf.keras.layers.GlobalAveragePooling2D(name='avgpool')(x)
initializer = tf.keras.initializers.RandomUniform(-1.0 / (2 * filters)**0.5,
1.0 / (2 * filters)**0.5)
x = tf.keras.layers.Dense(units=classes, kernel_initializer=initializer,
bias_initializer=initializer, name='fc')(x)
return x
def _MiniResNet(classes, input_shape, weights):
inputs = tf.keras.Input(shape=input_shape)
outputs = _resnet(inputs, [1, 1, 1, 1], classes=classes, filters=2)
return tf.keras.Model(inputs=inputs, outputs=outputs)
class EmpiricalTfTest(parameterized.TestCase):
def _compare_ntks(
self,
f,
f_jax,
params,
trace_axes,
diagonal_axes,
vmap_axes
):
if any(i == j for i in trace_axes for j in diagonal_axes):
raise absltest.SkipTest('Overlapping trace and diagonal axes.')
kwargs = dict(
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
)
jax_ntk_fns = [
jax.jit(nt.empirical_ntk_fn(
**kwargs, f=f_jax, implementation=i, vmap_axes=v))
for i in nt.NtkImplementation
for v in vmap_axes if v not in trace_axes + diagonal_axes
]
ntk_fns = [
experimental.empirical_ntk_fn_tf(**kwargs,
f=f,
implementation=i,
vmap_axes=v)
for i in nt.NtkImplementation
for v in vmap_axes if v not in trace_axes + diagonal_axes
]
x_shape = (f.input_shape[1:] if isinstance(f, tf.Module) else
f.input_signature[1].shape[1:])
x1 = tf.random.normal((2,) + x_shape, seed=2) / onp.prod(x_shape)**0.5
x2 = tf.random.normal((3,) + x_shape, seed=3) / onp.prod(x_shape)**0.5
x1_jax = np.array(x1)
x2_jax = np.array(x2)
params_jax = jax.tree_map(lambda x: np.array(x), params)
jax_ntks = [ntk_fn_i(x1_jax, x2_jax, params_jax)
for ntk_fn_i in jax_ntk_fns]
ntks = list(enumerate([ntk_fn_i(x1, x2, params)
for ntk_fn_i in ntk_fns]))
if len(tf.config.list_physical_devices()) > 1: # TPU
atol = 0.
rtol = 5e-3
atol_jax = 0.4
rtol_jax = 0.15 # TODO(romann): revisit poor TPU agreement.
else:
atol = 1e-5
rtol = 1e-4
atol_jax = 0.
rtol_jax = 5e-5
for i1, ntk1 in ntks:
for i2, ntk2 in ntks[i1 + 1:]:
# Compare different implementation
onp.testing.assert_allclose(ntk1, ntk2, rtol=rtol, atol=atol)
# Compare against the JAX version (without calling `jax2tf`).
onp.testing.assert_allclose(ntk1, jax_ntks[i1], rtol=rtol_jax,
atol=atol_jax)
@parameterized.product(
f=[
_MiniResNet,
# # TODO(romann): MobileNet works, but takes too long to compile.
# tf.keras.applications.MobileNet,
],
input_shape=[
(32, 32, 3)
],
trace_axes=[
(),
(1,)
],
diagonal_axes=[
(),
(1,)
],
vmap_axes=[
(0, None)
]
)
def test_keras_functional(
self,
f,
input_shape,
trace_axes,
diagonal_axes,
vmap_axes,
):
f = f(classes=1, input_shape=input_shape, weights=None)
f.build((None, *input_shape))
f_jax, params = experimental.get_apply_fn_and_params(f)
self._compare_ntks(f, f_jax, params, trace_axes, diagonal_axes, vmap_axes)
@parameterized.product(
input_shape=[
(16, 16, 3)
],
trace_axes=[
(),
(1,)
],
diagonal_axes=[
(),
(1,)
],
vmap_axes=[
(0, None)
]
)
def test_keras_sequential(
self,
input_shape,
trace_axes,
diagonal_axes,
vmap_axes,
):
f = tf.keras.Sequential()
f.add(tf.keras.layers.Conv2D(4, (3, 3), activation='relu'))
f.add(tf.keras.layers.Conv2D(2, (2, 2), activation='relu'))
f.add(tf.keras.layers.Flatten())
f.add(tf.keras.layers.Dense(2))
f.build((None, *input_shape))
f_jax, params = experimental.get_apply_fn_and_params(f)
self._compare_ntks(f, f_jax, params, trace_axes, diagonal_axes, vmap_axes)
@parameterized.product(
f_f_jax=[
(_f1, _f1_jax),
(_f2, _f2_jax),
(_f3, _f3_jax),
(_f4, _f4_jax)
],
params_shape=[
_input_signature[0].shape
],
trace_axes=[
(),
(1,)
],
diagonal_axes=[
(),
(1,)
],
vmap_axes=[
(None,)
]
)
def test_tf_function(
self,
f_f_jax,
params_shape,
trace_axes,
diagonal_axes,
vmap_axes,
):
f, f_jax = f_f_jax
f = tf.function(f, input_signature=_input_signature)
params = tf.random.normal(params_shape, seed=4)
self._compare_ntks(f, f_jax, params, trace_axes, diagonal_axes, vmap_axes)
@parameterized.product(
trace_axes=[
(),
(1,)
],
diagonal_axes=[
(),
(1,)
],
vmap_axes=[
(0, None)
]
)
def test_tf_module(
self,
trace_axes,
diagonal_axes,
vmap_axes,
):
f = _MLP(input_size=5, sizes=[4, 6, 3], name='MLP')
f_jax, params = experimental.get_apply_fn_and_params(f)
self._compare_ntks(f, f_jax, params, trace_axes, diagonal_axes, vmap_axes)
if __name__ == '__main__':
absltest.main()
| 10,792 | 26.96114 | 78 | py |
neural-tangents | neural-tangents-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
"""Readthedocs configuration."""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = u'Neural Tangents'
copyright = u'2021, Google LLC.'
author = u'The Neural Tangents Authors'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
]
# set_type_checking_flag = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The main toctree document.
main_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'jax': ('https://jax.readthedocs.io/en/latest/', None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NeuralTangentsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, 'NeuralTangents.tex', u'Neural Tangents',
u'The Neural Tangents Authors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(main_doc, 'neuraltangents', u'Neural Tangents',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(main_doc,
'NeuralTangents',
u'Neural Tangents',
author,
'NeuralTangents',
'Neural Tangents: Fast and Easy Infinite Neural Networks in Python',
'Miscellaneous'),
]
# add_module_names = False
default_role = 'code'
def remove_module_docstring(app, what, name, obj, options, lines):
if what == 'module' and name == 'neural_tangents':
del lines[:]
def setup(app):
app.connect('autodoc-process-docstring', remove_module_docstring)
app.add_css_file('style.css')
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
autodoc_mock_imports = [
'frozendict', 'jax', 'absl', 'numpy', 'scipy', 'tensorflow', 'tf2jax']
| 6,268 | 27.889401 | 79 | py |
neural-tangents | neural-tangents-main/neural_tangents/stax.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Closed-form NNGP and NTK library.
This library contains layers mimicking those in
:obj:`jax.example_libraries.stax` with similar API apart from:
1) Instead of `(init_fn, apply_fn)` tuple, layers return a triple
`(init_fn, apply_fn, kernel_fn)`, where the added `kernel_fn` maps a
:class:`~neural_tangents.Kernel` to a new :class:`~neural_tangents.Kernel`, and
represents the change in the analytic NTK and NNGP kernels
(:attr:`~neural_tangents.Kernel.nngp`, :attr:`~neural_tangents.Kernel.ntk`).
These functions are chained / stacked together within the :obj:`serial` or
:obj:`parallel` combinators, similarly to `init_fn` and `apply_fn`.
For details, please see "`Neural Tangents: Fast and Easy Infinite Neural
Networks in Python <https://arxiv.org/abs/1912.02803>`_".
2) In layers with random weights, NTK parameterization is used by default
(see page 3 in
"`Neural Tangent Kernel: Convergence and Generalization in Neural Networks
<https://arxiv.org/abs/1806.07572>`_"). Standard parameterization can be
specified for :obj:`Conv` and :obj:`Dense` layers by a keyword argument
`parameterization`. For details, please see "`On the infinite width limit of
neural networks with a standard parameterization
<https://arxiv.org/abs/2001.07301>`_".
3) Some functionality may be missing (e.g.
:obj:`jax.example_libraries.stax.BatchNorm`), and some may be
present only in our library (e.g. :attr:`~Padding.CIRCULAR` padding,
:obj:`LayerNorm`, :obj:`GlobalAvgPool`, :obj:`GlobalSelfAttention`, flexible
batch and channel axes etc.).
Example:
>>> from jax import random
>>> import neural_tangents as nt
>>> from neural_tangents import stax
>>> #
>>> key1, key2 = random.split(random.PRNGKey(1), 2)
>>> x_train = random.normal(key1, (20, 32, 32, 3))
>>> y_train = random.uniform(key1, (20, 10))
>>> x_test = random.normal(key2, (5, 32, 32, 3))
>>> #
>>> init_fn, apply_fn, kernel_fn = stax.serial(
>>> stax.Conv(128, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(256, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(512, (3, 3)),
>>> stax.Flatten(),
>>> stax.Dense(10)
>>> )
>>> #
>>> predict_fn = nt.predict.gradient_descent_mse_ensemble(kernel_fn, x_train,
>>> y_train)
>>> #
>>> # (5, 10) np.ndarray NNGP test prediction
>>> y_test_nngp = predict_fn(x_test=x_test, get='nngp')
>>> #
>>> # (5, 10) np.ndarray NTK prediction
>>> y_test_ntk = predict_fn(x_test=x_test, get='ntk')
"""
# Layer combinators, combining multiple layers into a single layer.
from ._src.stax.combinators import (
parallel,
serial,
repeat
)
# Elementwise nonlinearities.
from ._src.stax.elementwise import (
ABRelu,
Abs,
Cos,
Elementwise,
ElementwiseNumerical,
Erf,
Exp,
ExpNormalized,
Gabor,
Gaussian,
Gelu,
Hermite,
LeakyRelu,
Monomial,
Polynomial,
Rbf,
RectifiedMonomial,
Relu,
Sigmoid_like,
Sign,
Sin,
)
# Linear layers.
from ._src.stax.linear import (
Aggregate,
AvgPool,
Conv,
ConvLocal,
ConvTranspose,
Dense,
Identity,
Index,
DotGeneral,
Dropout,
Flatten,
GlobalAvgPool,
GlobalSelfAttention,
GlobalSumPool,
ImageResize,
LayerNorm,
SumPool,
)
# Helper object for the `Index` layer.
from ._src.stax.linear import (
Slice
)
# Branching layers.
from ._src.stax.branching import (
FanInConcat,
FanInProd,
FanInSum,
FanOut,
)
# Enums to specify layer behavior.
from ._src.stax.linear import (
AggregateImplementation,
AttentionMechanism,
Padding,
PositionalEmbedding,
)
# Decorators and classes for constructing your own layers.
from ._src.stax.requirements import (
Bool,
Diagonal,
MaskedArray,
layer,
requires,
supports_masking,
unmask_fn,
)
| 4,458 | 25.861446 | 79 | py |
neural-tangents | neural-tangents-main/neural_tangents/__init__.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Neural Tangents modules and functions."""
__version__ = '0.6.3'
from . import experimental
from . import predict
from . import stax
from ._src.batching import batch
from ._src.empirical import empirical_kernel_fn
from ._src.empirical import empirical_nngp_fn
from ._src.empirical import empirical_ntk_fn
from ._src.empirical import empirical_ntk_vp_fn
from ._src.empirical import linearize
from ._src.empirical import NtkImplementation
from ._src.empirical import taylor_expand
from ._src.monte_carlo import monte_carlo_kernel_fn
from ._src.utils.kernel import Kernel
| 1,238 | 35.441176 | 80 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/empirical.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute empirical NNGP and NTK; approximate functions via Taylor series.
All functions in this module are applicable to any JAX functions of proper
signatures (not only those from :obj:`~neural_tangents.stax`).
NNGP and NTK are computed using :obj:`~neural_tangents.empirical_nngp_fn`,
:obj:`~neural_tangents.empirical_ntk_fn`, or
:obj:`~neural_tangents.empirical_kernel_fn` (for both). The kernels have a very
specific output shape convention that may be unexpected. Further, NTK has
multiple implementations that may perform differently depending on the task.
Please read individual functions' docstrings.
For details, please see "`Fast Finite Width Neural Tangent Kernel
<https://arxiv.org/abs/2206.08720>`_".
Example:
>>> from jax import random
>>> import neural_tangents as nt
>>> from neural_tangents import stax
>>> #
>>> key1, key2, key3 = random.split(random.PRNGKey(1), 3)
>>> x_train = random.normal(key1, (20, 32, 32, 3))
>>> y_train = random.uniform(key1, (20, 10))
>>> x_test = random.normal(key2, (5, 32, 32, 3))
>>> #
>>> # A narrow CNN.
>>> init_fn, f, _ = stax.serial(
>>> stax.Conv(32, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(32, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(32, (3, 3)),
>>> stax.Flatten(),
>>> stax.Dense(10)
>>> )
>>> #
>>> _, params = init_fn(key3, x_train.shape)
>>> #
>>> # Default setting: reducing over logits; pass `vmap_axes=0` because the
>>> # network is iid along the batch axis, no BatchNorm. Use default
>>> # `implementation=nt.NtkImplementation.JACOBIAN_CONTRACTION` (`1`).
>>> kernel_fn = nt.empirical_kernel_fn(
>>> f, trace_axes=(-1,), vmap_axes=0,
>>> implementation=nt.NtkImplementation.JACOBIAN_CONTRACTION)
>>> #
>>> # (5, 20) np.ndarray test-train NNGP/NTK
>>> nngp_test_train = kernel_fn(x_test, x_train, 'nngp', params)
>>> ntk_test_train = kernel_fn(x_test, x_train, 'ntk', params)
>>> #
>>> # Full kernel: not reducing over logits. Use structured derivatives
>>> # `implementation=nt.NtkImplementation.STRUCTURED_DERIVATIVES` (`3`) for
>>> # typically faster computation and lower memory cost.
>>> kernel_fn = nt.empirical_kernel_fn(
>>> f, trace_axes=(), vmap_axes=0,
>>> implementation=nt.NtkImplementation.STRUCTURED_DERIVATIVES)
>>> #
>>> # (5, 20, 10, 10) np.ndarray test-train NNGP/NTK namedtuple.
>>> k_test_train = kernel_fn(x_test, x_train, None, params)
>>> #
>>> # A wide FCN with lots of parameters and many (`100`) outputs.
>>> init_fn, f, _ = stax.serial(
>>> stax.Flatten(),
>>> stax.Dense(1024),
>>> stax.Relu(),
>>> stax.Dense(1024),
>>> stax.Relu(),
>>> stax.Dense(100)
>>> )
>>> #
>>> _, params = init_fn(key3, x_train.shape)
>>> #
>>> # Use ntk-vector products
>>> # (`implementation=nt.NtkImplementation.NTK_VECTOR_PRODUCTS`) since the
>>> # network has many parameters relative to the cost of forward pass,
>>> # large outputs.
>>> ntk_fn = nt.empirical_ntk_fn(
>>> f, vmap_axes=0,
>>> implementation=nt.NtkImplementation.NTK_VECTOR_PRODUCTS)
>>> #
>>> # (5, 5) np.ndarray test-test NTK
>>> ntk_test_test = ntk_fn(x_test, None, params)
>>> #
>>> # Compute only output variances:
>>> nngp_fn = nt.empirical_nngp_fn(f, diagonal_axes=(0,))
>>> #
>>> # (20,) np.ndarray train-train diagonal NNGP
>>> nngp_train_train_diag = nngp_fn(x_train, None, params)
"""
import enum
import functools
import operator
from typing import Callable, Dict, KeysView, List, Optional, Set, Tuple, TypeVar, Union, Iterable
import warnings
import jax
from jax import core, lax
from jax import eval_shape, jacobian, jvp, vjp, vmap
from jax import linear_transpose
from jax import linear_util as lu
from jax.core import Jaxpr, JaxprEqn, Literal, ShapedArray, Value, Var
from jax.interpreters import ad, xla
from jax.interpreters.ad import UndefinedPrimal, Zero
import jax.numpy as np
from jax.tree_util import tree_flatten, tree_map, tree_reduce, tree_structure, tree_transpose, tree_unflatten
from jax.util import safe_map as map, safe_zip as zip
import numpy as onp
from .utils import rules
from .utils import utils
from .utils.typing import ApplyFn, Axes, EmpiricalGetKernelFn, EmpiricalKernelFn, PyTree, VMapAxes, VMapAxisTriple
# LINEARIZATION AND TAYLOR EXPANSION
def linearize(f: ApplyFn, params: PyTree) -> ApplyFn:
"""Returns a function `f_lin`, the first order taylor approximation to `f`.
Example:
>>> # Compute the MSE of the first order Taylor series of a function.
>>> f_lin = linearize(f, params)
>>> mse = np.mean((f(new_params, x) - f_lin(new_params, x)) ** 2)
Args:
f:
A function that we would like to linearize. It should have the signature
`f(params, *args, **kwargs)` where `params` is a `PyTree` and `f` should
return a `PyTree`.
params:
Initial parameters to the function that we would like to take the
Taylor series about. This can be any structure that is compatible with the
JAX tree operations.
Returns:
A function `f_lin(new_params, *args, **kwargs)` whose signature is the same
as f. Here `f_lin` implements the first-order taylor series of `f` about
`params`.
"""
def f_lin(p, *args, **kwargs):
dparams = _sub(p, params)
f_params_x, proj = jvp(lambda param: f(param, *args, **kwargs),
(params,), (dparams,))
return _add(f_params_x, proj)
return f_lin
def taylor_expand(f: ApplyFn, params: PyTree, degree: int) -> ApplyFn:
"""Returns a function `f_tayl`, Taylor approximation to `f` of order `degree`.
Example:
>>> # Compute the MSE of the third order Taylor series of a function.
>>> f_tayl = taylor_expand(f, params, 3)
>>> mse = np.mean((f(new_params, x) - f_tayl(new_params, x)) ** 2)
Args:
f:
A function that we would like to Taylor expand. It should have the
signature `f(params, *args, **kwargs)` where `params` is a `PyTree`, and
`f` returns a `PyTree`.
params:
Initial parameters to the function that we would like to take the Taylor
series about. This can be any structure that is compatible with the JAX
tree operations.
degree:
The degree of the Taylor expansion.
Returns:
A function `f_tayl(new_params, *args, **kwargs)` whose signature is the
same as `f`. Here `f_tayl` implements the `degree`-order taylor series of
`f` about `params`.
"""
def taylorize_r(f, params, dparams, degree, current_degree):
"""Recursive function to accumulate contributions to the Taylor series."""
if current_degree == degree:
return f(params)
def f_jvp(p):
_, val_jvp = jvp(f, (p,), (dparams,))
return val_jvp
df = taylorize_r(f_jvp, params, dparams, degree, current_degree + 1)
return _add(f(params), _div(df, (current_degree + 1)))
def f_tayl(p, *args, **kwargs):
dparams = _sub(p, params)
return taylorize_r(lambda param: f(param, *args, **kwargs),
params, dparams, degree, 0)
return f_tayl
# NNGP
def empirical_nngp_fn(
f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = ()
) -> EmpiricalKernelFn:
"""Returns a function to draw a single sample the NNGP of a given network `f`.
The Neural Network Gaussian Process (NNGP) kernel is defined as
:math:`f(X_1) f(X_2)^T`, i.e. the outer product of the function outputs.
.. warning::
Resulting kernel shape is *nearly* `zip(f(x1).shape, f(x2).shape)`
subject to `trace_axes` and `diagonal_axes` parameters, which make certain
assumptions about the outputs `f(x)` that may only be true in the infinite
width / infinite number of samples limit, or may not apply to your
architecture. For most precise results in the context of linearized training
dynamics of a specific finite-width network, set both `trace_axes=()` and
`diagonal_axes=()` to obtain the kernel exactly of shape
`zip(f(x1).shape, f(x2).shape)`.
For networks with multiple (i.e. lists, tuples, PyTrees) outputs, in principal
the empirical kernels will have terms measuring the covariance between the
outputs. Here, we ignore these cross-terms and consider each output
separately. Please raise an issue if this feature is important to you.
Args:
f:
the function whose NNGP we are computing. It should have the signature
`f(params, x, **kwargs)` where `params` is a `PyTree`, `x` is a `PyTree`,
and `f` should also return a `PyTree`.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
Returns:
A function to draw a single sample the NNGP of a given network `f`.
"""
def nngp_fn(x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs) -> PyTree:
"""Computes a single sample of the empirical NNGP.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical NNGP. The shape of the kernel is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
"""
def output(x, **kwargs):
return f(params, x, **kwargs)
kwargs1, kwargs2 = utils.split_kwargs(apply_fn_kwargs, x1, x2)
out1 = output(x1, **kwargs1)
out2 = output(x2, **kwargs2) if not utils.all_none(x2) else out1
def contract(out1: np.ndarray, out2: np.ndarray) -> np.ndarray:
dot = _dot_general(out1, out2, trace_axes, diagonal_axes)
return dot / utils.size_at(out1, trace_axes)
return tree_map(contract, out1, out2)
return nngp_fn
# NTK
class NtkImplementation(enum.IntEnum):
"""Implementation method of the underlying finite width NTK computation.
Below is a very brief summary of each method. For details, please see "`Fast
Finite Width Neural Tangent Kernel <https://arxiv.org/abs/2206.08720>`_".
Attributes:
AUTO:
(or `0`) evaluates FLOPs of all other methods at compilation time,
and selects the fastest method. However, at the time it only works
correctly on TPUs, and on CPU/GPU can return wrong results, which is why
it is not the default. TODO(romann): revisit based on http://b/202218145.
JACOBIAN_CONTRACTION:
(or `1`) computes the NTK as the outer product of two Jacobians, each
computed using reverse-mode Autodiff (vector-Jacobian products, VJPs).
When JITted, the contraction is performed in a layerwise fashion, so that
entire Jacobians aren't necessarily instantiated in memory at once, and
the memory usage of the method can be lower than memory needed to
instantiate the two Jacobians. This method is best suited for networks
with small outputs (such as scalar outputs for binary classification or
regression, as opposed to 1000 ImageNet classes), and an expensive
forward pass relative to the number of parameters (such as CNNs, where
forward pass reuses a small filter bank many times). It is also the the
most reliable method, since its implementation is simplest, and
reverse-mode Autodiff is most commonly used and well tested elsewhere.
For this reason it is set as the default.
NTK_VECTOR_PRODUCTS:
(or `2`) computes the NTK as a sequence of NTK-vector products, similarly
to how a Jacobian is computed as a sequence of Jacobian-vector products
(JVPs) or vector-Jacobian products (VJPs). This amounts to using both
forward (JVPs) and reverse (VJPs) mode Autodiff, and allows to eliminate
the Jacobian contraction at the expense of additional forward passes.
Therefore this method is recommended for networks with a cheap forward
pass relative to the number of parameters (e.g. fully-connected networks,
where each parameter matrix is used only once in the forward pass), and
networks with large outputs (e.g. 1000 ImageNet classes). Memory
requirements of this method are same as :attr:`JACOBIAN_CONTRACTION`
(`1`). Due to reliance of forward-mode Autodiff, this method is slightly
more prone to JAX and XLA bugs than :attr:`JACOBIAN_CONTRACTION` (`1`),
but overall is quite simple and reliable.
STRUCTURED_DERIVATIVES:
(or `3`) uses a custom JAX interpreter to compute the NTK more
efficiently than other methods. It traverses the computational graph of a
function in the same order as during reverse-mode Autodiff, but instead
of computing VJPs, it directly computes MJJMPs,
"matrix-Jacobian-Jacobian-matrix" products, which arise in the
computation of an NTK. Each MJJMP computation relies on the structure in
the Jacobians, hence the name. This method can be dramatically faster
(up to several orders of magnitude) then other methods on fully-connected
networks, and is usually faster or equivalent on CNNs, Transformers, and
other architectures, but exact speedup (e.g. from no speedup to 10X)
depends on each specific setting. It can also use less memory than other
methods. In our experience it consistently outperforms other methods in
most settings. However, its implementation is significantly more complex
(hence bug-prone), and it doesn't yet support functions using more exotic
JAX primitives (e.g. :obj:`jax.checkpoint`, parallel collectives such as
:obj:`jax.lax.psum`, compiled loops like :obj:`jax.lax.scan`, etc.), which
is why it is highly-recommended to try, but not set as the default yet.
"""
AUTO = 0
JACOBIAN_CONTRACTION = 1
NTK_VECTOR_PRODUCTS = 2
STRUCTURED_DERIVATIVES = 3
DEFAULT_NTK_IMPLEMENTATION = NtkImplementation.JACOBIAN_CONTRACTION
"""Default user-facing empirical NTK implementation.
We default to `JACOBIAN_CONTRACTION` since it's the most straightforward and
reliable method, virtually guaranteed to compute the correct result.
"""
_DEFAULT_TESTING_NTK_IMPLEMENTATION = NtkImplementation.STRUCTURED_DERIVATIVES
"""Default empirical NTK implementation used in `tests`.
We default to `STRUCTURED_DERIVATIVES` since it is the fastest but also most
complex method, hence benefiting from additional testing against infinite-width
results.
"""
_DEFAULT_NTK_J_RULES: bool = True
"""Says whether to use custom Jacobian rules in `STRUCTURED_DERIVATIVES` (`3`).
Useful for debugging and testing. Theoretically should be set to `True`, but if
some Jacobian rule is implemented suboptimally, trying out `False` could improve
performance.
"""
_DEFAULT_NTK_S_RULES: bool = True
"""Says whether to use structure rules in `STRUCTURED_DERIVATIVES` (`3`).
Useful for debugging and testing. In practice should be set to `True`, and
setting it to `False` can lead to dramatic deterioration of performance.
"""
_DEFAULT_NTK_FWD: Optional[bool] = None
"""Says whether to use forward mode in `STRUCTURED_DERIVATIVES` (`3`) Jacobians.
Useful for debugging and testing, but for best performance should be set to
`None`, i.e. to selecting forward or reverse mode AD automatically based on
input/output sizes.
"""
def _empirical_auto_ntk_fn(**kwargs) -> EmpiricalGetKernelFn:
"""Compute NTK by automatically selecting the best implementation.
Returns wrong FLOPS on CPU and GPU when JITting.
TODO(romann): revisit based on http://b/202218145.
"""
cache = {}
def ntk_fn(
x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs
) -> np.ndarray:
"""Computes a single sample of the automatic empirical NTK.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical NTK. The shape of the kernel is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
"""
shapes = tree_map(np.shape, (x1, x2, params, apply_fn_kwargs))
shapes = _to_tuple_tree(shapes)
if shapes not in cache:
best_ntk_fn = None
best_flops = onp.inf
for implementation in NtkImplementation:
if implementation != NtkImplementation.AUTO:
ntk_fn = empirical_ntk_fn(**kwargs, implementation=implementation)
flops = _get_flops(ntk_fn, True, x1, x2, params, **apply_fn_kwargs)
print(f'impl={implementation}, flops={flops}')
if flops < best_flops:
best_flops = flops
best_ntk_fn = ntk_fn
if best_ntk_fn is None:
raise ValueError('This should not happen.')
cache[shapes] = best_ntk_fn
return cache[shapes](x1, x2, params, **apply_fn_kwargs)
return ntk_fn
def _jacobian_contraction_ntk_fn(
f: ApplyFn,
trace_axes: Axes,
diagonal_axes: Axes,
vmap_axes: VMapAxes,
**kwargs
) -> EmpiricalKernelFn:
"""Compute NTK by directly instantiating Jacobians and contracting."""
def sum_and_contract(fx, j1, j2):
ndim = fx.ndim
size = utils.size_at(fx, trace_axes)
_diagonal_axes = utils.canonicalize_axis(diagonal_axes, ndim)
_trace_axes = utils.canonicalize_axis(trace_axes, ndim)
def contract(x, y):
param_axes = list(range(x.ndim))[ndim:]
contract_axes = _trace_axes + param_axes
return _dot_general(x, y, contract_axes, _diagonal_axes) / size
return tree_reduce(operator.add, tree_map(contract, j1, j2))
def ntk_fn(
x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs
) -> np.ndarray:
"""Computes a single sample of the empirical NTK (jacobian outer product).
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical NTK. The shape of the kernel is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
"""
args1, args2, fx1, fx2, fx_axis, keys, kw_axes, x_axis = _get_args(
f, apply_fn_kwargs, params, vmap_axes, x1, x2)
def j_fn(x, *args):
_kwargs = {k: v for k, v in zip(keys, args)}
fx = _get_f_params(f, x, x_axis, fx_axis, kw_axes, **_kwargs)
jx = jacobian(fx)(params)
return jx
if not utils.all_none(x_axis) or not utils.all_none(kw_axes):
in_axes = [x_axis] + [kw_axes[k] if k in kw_axes else None for k in keys]
j_fn = vmap(j_fn, in_axes=in_axes, out_axes=fx_axis)
j1 = j_fn(x1, *args1)
j2 = j_fn(x2, *args2) if not utils.all_none(x2) else j1
ntk = tree_map(sum_and_contract, fx1, j1, j2)
return ntk
return ntk_fn
def _ntk_vector_products_ntk_fn(
f: ApplyFn,
trace_axes: Axes,
diagonal_axes: Axes,
vmap_axes: VMapAxes,
**kwargs
) -> EmpiricalKernelFn:
"""Compute NTK via NTK-vector products."""
def ntk_fn(
x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs
) -> np.ndarray:
"""Computes a single sample of the empirical NTK with NTK-vector products.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1 != x2`)
or same (if `x1 == x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical NTK. The shape of the kernel is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
"""
args1, args2, fx1, fx2, fx_axis, keys, kw_axes, x_axis = _get_args(
f, apply_fn_kwargs, params, vmap_axes, x1, x2)
def get_ntk(x1, x2, *args):
f1, f2 = _get_f1_f2(f, keys, x_axis, fx_axis, kw_axes, args, x1, x2)
def delta_vjp_jvp(delta):
def delta_vjp(delta):
return vjp(f2, params)[1](delta)
return jvp(f1, (params,), delta_vjp(delta))[1]
fx1, fx2 = eval_shape(f1, params), eval_shape(f2, params)
eye = _std_basis(fx1)
ntk = vmap(linear_transpose(delta_vjp_jvp, fx2))(eye)
ntk = tree_map(lambda fx12: _unravel_array_into_pytree(fx1, 0, fx12), ntk)
ntk = _diagonal(ntk, fx1)
return ntk
if not utils.all_none(x_axis) or not utils.all_none(kw_axes):
x2 = x1 if utils.all_none(x2) else x2
kw_in_axes = [kw_axes[k] if k in kw_axes else None for k in keys]
in_axes1 = [x_axis, None] + kw_in_axes + [None] * len(kw_in_axes)
in_axes2 = [None, x_axis] + [None] * len(kw_in_axes) + kw_in_axes
get_ntk = vmap(vmap(get_ntk,
in_axes1,
fx_axis),
in_axes2,
_add(fx_axis, _ndim(fx1)))
ntk = get_ntk(x1, x2, *args1, *args2)
ntk = tree_map(lambda x: _trace_and_diagonal(x, trace_axes, diagonal_axes),
ntk)
return ntk
return ntk_fn
def _structured_derivatives_ntk_fn(
f: ApplyFn,
trace_axes: Axes,
diagonal_axes: Axes,
vmap_axes: VMapAxes,
_j_rules: bool,
_s_rules: bool,
_fwd: Optional[bool]
) -> EmpiricalKernelFn:
"""Compute NTK by using structured derivatives."""
def sum_and_contract(
fx1: np.ndarray,
fx2: np.ndarray,
fx_axis,
df_dys_1: List[Union[np.ndarray, Zero]],
df_dys_2: List[Union[np.ndarray, Zero]],
dy_dws_1: List[Tuple[np.ndarray, rules.Structure]],
dy_dws_2: List[Tuple[np.ndarray, rules.Structure]],
dtype: np.dtype
):
ndim = fx1.ndim
size = utils.size_at(fx1, trace_axes)
_diagonal_axes = utils.canonicalize_axis(diagonal_axes, ndim)
_trace_axes = utils.canonicalize_axis(trace_axes, ndim)
def contract(df_dys_1, df_dys_2, dy_dws_1, dy_dws_2):
ntk = np.zeros((), dtype=dtype)
for df_dy_1, dy_dw_1_ in zip(df_dys_1, dy_dws_1):
for df_dy_2, dy_dw_2_ in zip(df_dys_2, dy_dws_2):
dy_dw_1: np.ndarray
s1: rules.Structure
dy_dw_1, s1 = dy_dw_1_
dy_dw_2: np.ndarray
s2: rules.Structure
dy_dw_2, s2 = dy_dw_2_
if isinstance(dy_dw_1, Zero) or isinstance(dy_dw_2, Zero):
continue
df_dy_dims_1, df_dy_dims_2, out_dims = _get_dims(df_dy_1,
df_dy_2,
ndim,
_trace_axes,
_diagonal_axes)
if len(s1.out_trace) != len(s2.out_trace):
raise NotImplementedError('Different number of trace_axes 1/2.')
for i, (id_1, id_2) in enumerate(zip(s1.out_trace, s2.out_trace)):
axis_id = df_dy_1.ndim + df_dy_2.ndim + i
y_axis_1 = id_1 % (df_dy_1.ndim - ndim)
y_axis_2 = id_2 % (df_dy_2.ndim - ndim)
df_dy_dims_1[ndim + y_axis_1] = axis_id
df_dy_dims_2[ndim + y_axis_2] = axis_id
dy_dw_dims_1 = list(range(-dy_dw_1.ndim, 0))
dy_dw_dims_2 = list(range(-dy_dw_2.ndim, 0))
if fx_axis is not None:
df_dy_1 = np.moveaxis(df_dy_1, 0, fx_axis)
df_dy_2 = np.moveaxis(df_dy_2, 0, fx_axis)
dy_dw_dims_1[0] = df_dy_dims_1[fx_axis]
dy_dw_dims_2[0] = df_dy_dims_2[fx_axis]
ix_1, ix_2 = 1, 1
else:
ix_1, ix_2 = 0, 0
if len(s1.out_diagonal) != len(s2.out_diagonal):
raise NotImplementedError('Different number of diagonal_axes 1/2.')
for i, (id_1, id_2) in enumerate(zip(s1.out_diagonal,
s2.out_diagonal)):
# TODO(romann): compute based on array dimensions.
axis_shift = -100_000 # Huge axis shift to ensure unique axis ids.
axis_id = (-axis_shift -df_dy_1.ndim - df_dy_2.ndim - dy_dw_1.ndim
- dy_dw_2.ndim - i)
df_dy_dims_1[ndim + id_1] = axis_id
dy_dw_dims_1[ix_1 + id_1] = axis_id
df_dy_dims_2[ndim + id_2] = axis_id
dy_dw_dims_2[ix_2 + id_2] = axis_id
for i in range(ndim, df_dy_1.ndim):
if i - ndim not in (s1.out_trace +
s1.out_diagonal +
s1.out_broadcast):
dy_dw_dims_1[ix_1] = df_dy_dims_1[i]
ix_1 += 1
for i in range(ndim, df_dy_2.ndim):
if i - ndim not in (s2.out_trace +
s2.out_diagonal +
s2.out_broadcast):
dy_dw_dims_2[ix_2] = df_dy_dims_2[i]
ix_2 += 1
_check_einsum_no_broadcast(
arrays=[df_dy_1, dy_dw_1, dy_dw_2, df_dy_2],
dims=[df_dy_dims_1, dy_dw_dims_1, dy_dw_dims_2, df_dy_dims_2]
)
ntk_l = np.einsum(
df_dy_1, df_dy_dims_1,
dy_dw_1, dy_dw_dims_1,
dy_dw_2, dy_dw_dims_2,
df_dy_2, df_dy_dims_2,
out_dims
)
ntk += ntk_l
return ntk
ntk = tree_reduce(
operator.add,
tree_map(
contract,
df_dys_1, df_dys_2, dy_dws_1, dy_dws_2,
is_leaf=
lambda x: (x == [] or
(isinstance(x, list) and isinstance(x[0], np.ndarray)))),
np.zeros((), dtype)
)
ntk /= size
ntk_shape = _ntk_shape(fx1.shape, fx2.shape, trace_axes, diagonal_axes)
ntk = np.broadcast_to(ntk, ntk_shape) # if ntk is 0.
return ntk
def ntk_fn(
x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs
) -> np.ndarray:
"""Computes a single sample of the structured derivatives NTK.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical NTK. The shape of the kernel is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
"""
args1, args2, fx1, fx2, fx_axis, keys, kw_axes, x_axis = _get_args(
f, apply_fn_kwargs, params, vmap_axes, x1, x2)
def j_fn(x, *args):
_kwargs = {k: v for k, v in zip(keys, args)}
fx = _get_f_params(f, x, x_axis, fx_axis, kw_axes, **_kwargs)
df_dys, dy_dws = _get_df_dys_and_dy_dws(fn=fx, params=params,
_j_rules=_j_rules,
_s_rules=_s_rules, _fwd=_fwd)
return df_dys, dy_dws
if not utils.all_none(x_axis) or not utils.all_none(kw_axes):
in_axes = [x_axis] + [kw_axes[k] if k in kw_axes else None for k in keys]
j_fn = vmap(j_fn, in_axes=in_axes, out_axes=0)
df_dys_1, dy_dws_1 = j_fn(x1, *args1)
df_dys_2, dy_dws_2 = j_fn(x2, *args2) if not utils.all_none(x2) else (
df_dys_1, dy_dws_1)
fx_axis, dtype = _get_fx_axis_and_dtype(fx1, fx_axis, params)
ntk = tree_map(
functools.partial(
sum_and_contract,
dy_dws_1=dy_dws_1,
dy_dws_2=dy_dws_2,
dtype=dtype),
fx1,
fx2,
fx_axis,
df_dys_1,
df_dys_2,
)
return ntk
return ntk_fn
_implementation_to_ntk_fn = {
NtkImplementation.AUTO: _empirical_auto_ntk_fn,
NtkImplementation.JACOBIAN_CONTRACTION: _jacobian_contraction_ntk_fn,
NtkImplementation.NTK_VECTOR_PRODUCTS: _ntk_vector_products_ntk_fn,
NtkImplementation.STRUCTURED_DERIVATIVES: _structured_derivatives_ntk_fn,
}
def empirical_ntk_fn(
f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = (),
vmap_axes: VMapAxes = None,
implementation: Union[NtkImplementation, int] = DEFAULT_NTK_IMPLEMENTATION,
_j_rules: bool = _DEFAULT_NTK_J_RULES,
_s_rules: bool = _DEFAULT_NTK_S_RULES,
_fwd: Optional[bool] = _DEFAULT_NTK_FWD,
) -> EmpiricalKernelFn:
r"""Returns a function to draw a single sample the NTK of a given network `f`.
The Neural Tangent Kernel is defined as :math:`J(X_1) J(X_2)^T` where
:math:`J` is the Jacobian :math:`df/dparams` of shape
`full_output_shape + params.shape`.
For best performance:
1) pass `x2=None` if `x1 == x2;
2) prefer square batches (i.e `x1.shape == x2.shape`);
3) make sure to set `vmap_axes` correctly.
4) try different `implementation` values.
.. warning::
Resulting kernel shape is *nearly* `zip(f(x1).shape, f(x2).shape)`
subject to `trace_axes` and `diagonal_axes` parameters, which make certain
assumptions about the outputs `f(x)` that may only be true in the infinite
width / infinite number of samples limit, or may not apply to your
architecture. For most precise results in the context of linearized training
dynamics of a specific finite-width network, set both `trace_axes=()` and
`diagonal_axes=()` to obtain the kernel exactly of shape
`zip(f(x1).shape, f(x2).shape)`.
For networks with multiple (i.e. lists, tuples, PyTrees) outputs, in principal
the empirical kernels will have terms measuring the covariance between the
outputs. Here, we ignore these cross-terms and consider each output
separately. Please raise an issue if this feature is important to you.
Args:
f:
the function whose NTK we are computing. It should have the signature
`f(params, x, **kwargs)` where `params` is a `PyTree`, `x` is a `PyTree`,
and `f` should also return a `PyTree`.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
vmap_axes:
A triple of `(in_axes, out_axes, kwargs_axes)`
passed to `vmap` to evaluate the empirical NTK in parallel ove these axes.
Precisely, providing this argument implies that `f(params, x, **kwargs)`
equals to a concatenation along `out_axes` of `f` applied to slices of
`x` and `**kwargs` along `in_axes` and `kwargs_axes`. In other words, it
certifies that `f` can be evaluated as a `vmap` with `out_axes=out_axes`
over `x` (along `in_axes`) and those arguments in `**kwargs` that are
present in `kwargs_axes.keys()` (along `kwargs_axes.values()`).
For example if `_, f, _ = nt.stax.Aggregate()`, `f` is called via
`f(params, x, pattern=pattern)`. By default, inputs `x`, patterns
`pattern`, and outputs of `f` are all batched along the leading `0`
dimension, and each output `f(params, x, pattern=pattern)[i]` only
depends on the inputs `x[i]` and `pattern[i]`. In this case, we can
pass `vmap_axes=(0, 0, dict(pattern=0)` to specify along which dimensions
inputs, outputs, and keyword arguments are batched respectively.
This allows us to evaluate Jacobians much more
efficiently. If `vmap_axes` is not a triple, it is interpreted as
`in_axes = out_axes = vmap_axes, kwargs_axes = {}`. For example a very
common use case is `vmap_axes=0` for a neural network with leading (`0`)
batch dimension, both for inputs and outputs, and no interactions between
different elements of the batch (e.g. no BatchNorm, and, in the case of
`nt.stax`, also no Dropout). However, if there is interaction between
batch elements or no concept of a batch axis at all, `vmap_axes` must be
set to `None`, to avoid wrong (and potentially silent) results.
implementation:
An :class:`NtkImplementation` value (or an :class:`int` `0`, `1`, `2`,
or `3`). See the :class:`NtkImplementation` docstring for details.
_j_rules:
Internal debugging parameter, applicable only when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
custom Jacobian rules for intermediary primitive `dy/dw` computations for
MJJMPs (matrix-Jacobian-Jacobian-matrix products). Set to `False` to use
JVPs or VJPs, via JAX's :obj:`jax.jacfwd` or :obj:`jax.jacrev`. Custom
Jacobian rules (`True`) are expected to be not worse, and sometimes better
than automated alternatives, but in case of a suboptimal implementation
setting it to `False` could improve performance.
_s_rules:
Internal debugging parameter, applicable only when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
efficient MJJMp rules for structured `dy/dw` primitive Jacobians. In
practice should be set to `True`, and setting it to `False` can lead to
dramatic deterioration of performance.
_fwd:
Internal debugging parameter, applicable only when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
:obj:`jax.jvp` in intermediary primitive Jacobian `dy/dw` computations,
`False` to always use :obj:`jax.vjp`. `None` to decide automatically
based on input/output sizes. Applicable when `_j_rules=False`, or when a
primitive does not have a Jacobian rule. Should be set to `None` for best
performance.
Returns:
A function `ntk_fn` that computes the empirical ntk.
"""
return _implementation_to_ntk_fn[implementation](
f=f,
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
vmap_axes=vmap_axes,
_j_rules=_j_rules,
_s_rules=_s_rules,
_fwd=_fwd
)
# JOINT NNGP/NTK KERNEL FUNCTION
def empirical_kernel_fn(
f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = (),
vmap_axes: VMapAxes = None,
implementation: Union[NtkImplementation, int] = DEFAULT_NTK_IMPLEMENTATION,
_j_rules: bool = _DEFAULT_NTK_J_RULES,
_s_rules: bool = _DEFAULT_NTK_S_RULES,
_fwd: Optional[bool] = _DEFAULT_NTK_FWD,
) -> EmpiricalGetKernelFn:
r"""Returns a function that computes single draws from NNGP and NT kernels.
.. warning::
Resulting kernel shape is *nearly* `zip(f(x1).shape, f(x2).shape)`
subject to `trace_axes` and `diagonal_axes` parameters, which make certain
assumptions about the outputs `f(x)` that may only be true in the infinite
width / infinite number of samples limit, or may not apply to your
architecture. For most precise results in the context of linearized training
dynamics of a specific finite-width network, set both `trace_axes=()` and
`diagonal_axes=()` to obtain the kernel exactly of shape
`zip(f(x1).shape, f(x2).shape)`.
For networks with multiple (i.e. lists, tuples, PyTrees) outputs, in principal
the empirical kernels will have terms measuring the covariance between the
outputs. Here, we ignore these cross-terms and consider each output
separately. Please raise an issue if this feature is important to you.
Args:
f:
the function whose kernel(s) (NNGP and/or NTK) we are computing. It
should have the signature `f(params, x, **kwargs)` where `params` is a
`PyTree`, `x` is a `PyTree`, and `f` should also return a `PyTree`.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
vmap_axes:
applicable only to NTK.
A triple of `(in_axes, out_axes, kwargs_axes)`
passed to `vmap` to evaluate the empirical NTK in parallel ove these axes.
Precisely, providing this argument implies that `f(params, x, **kwargs)`
equals to a concatenation along `out_axes` of `f` applied to slices of
`x` and `**kwargs` along `in_axes` and `kwargs_axes`. In other words, it
certifies that `f` can be evaluated as a `vmap` with `out_axes=out_axes`
over `x` (along `in_axes`) and those arguments in `**kwargs` that are
present in `kwargs_axes.keys()` (along `kwargs_axes.values()`).
For example if `_, f, _ = nt.stax.Aggregate()`, `f` is called via
`f(params, x, pattern=pattern)`. By default, inputs `x`, patterns
`pattern`, and outputs of `f` are all batched along the leading `0`
dimension, and each output `f(params, x, pattern=pattern)[i]` only
depends on the inputs `x[i]` and `pattern[i]`. In this case, we can
pass `vmap_axes=(0, 0, dict(pattern=0)` to specify along which dimensions
inputs, outputs, and keyword arguments are batched respectively.
This allows us to evaluate Jacobians much more
efficiently. If `vmap_axes` is not a triple, it is interpreted as
`in_axes = out_axes = vmap_axes, kwargs_axes = {}`. For example a very
common use case is `vmap_axes=0` for a neural network with leading (`0`)
batch dimension, both for inputs and outputs, and no interactions between
different elements of the batch (e.g. no BatchNorm, and, in the case of
`nt.stax`, also no Dropout). However, if there is interaction between
batch elements or no concept of a batch axis at all, `vmap_axes` must be
set to `None`, to avoid wrong (and potentially silent) results.
implementation:
Applicable only to NTK, an :class:`NtkImplementation` value (or an
:class:`int` `0`, `1`, `2`, or `3`). See the :class:`NtkImplementation`
docstring for details.
_j_rules:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
custom Jacobian rules for intermediary primitive `dy/dw` computations for
MJJMPs (matrix-Jacobian-Jacobian-matrix products). Set to `False` to use
JVPs or VJPs, via JAX's :obj:`jax.jacfwd` or :obj:`jax.jacrev`. Custom
Jacobian rules (`True`) are expected to be not worse, and sometimes better
than automated alternatives, but in case of a suboptimal implementation
setting it to `False` could improve performance.
_s_rules:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
efficient MJJMp rules for structured `dy/dw` primitive Jacobians. In
practice should be set to `True`, and setting it to `False` can lead to
dramatic deterioration of performance.
_fwd:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
:obj:`jax.jvp` in intermediary primitive Jacobian `dy/dw` computations,
`False` to always use :obj:`jax.vjp`. `None` to decide automatically
based on input/output sizes. Applicable when `_j_rules=False`, or when a
primitive does not have a Jacobian rule. Should be set to `None` for best
performance.
Returns:
A function to draw a single sample the NNGP and NTK empirical kernels of a
given network `f`.
"""
kwargs = dict(
f=f,
trace_axes=trace_axes,
diagonal_axes=diagonal_axes
)
ntk_kwargs = dict(
vmap_axes=vmap_axes,
implementation=implementation,
_j_rules=_j_rules,
_s_rules=_s_rules,
_fwd=_fwd,
)
kernel_fns = {
'nngp': empirical_nngp_fn(**kwargs),
'ntk': empirical_ntk_fn(**kwargs, **ntk_kwargs)
}
@utils.get_namedtuple('EmpiricalKernel')
def kernel_fn(
x1: PyTree,
x2: Optional[PyTree],
get: Union[None, str, Tuple[str, ...]],
params: PyTree,
**apply_fn_kwargs
) -> PyTree:
"""Computes a single sample of the empirical kernel of type `get`.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
get:
type of the empirical kernel. `get=None` means `get=("nngp", "ntk")`.
Can be a string (`"nngp"`) or a tuple of strings (`("ntk", "nngp")`).
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical kernel. The shape is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
If `get` is a string, returns the requested `np.ndarray`. If `get` is a
tuple, returns an `EmpiricalKernel` namedtuple containing the
requested information.
"""
if get is None:
get = ('nngp', 'ntk')
out_dict = {g: kernel_fns[g](x1, x2, params, **apply_fn_kwargs)
for g in get}
out_dict = _dict_of_tree_to_tree_of_dict(out_dict, get)
return out_dict
return kernel_fn
# NTK-VECTOR PRODUCT FUNCTION
def empirical_ntk_vp_fn(
f: ApplyFn,
x1: PyTree,
x2: Optional[PyTree],
params: PyTree,
**apply_fn_kwargs
) -> Callable[[PyTree], PyTree]:
"""Returns an NTK-vector product function.
The function computes NTK-vector product without instantiating the NTK, and
has the runtime equivalent to `(N1 + N2)` forward passes through `f`, and
memory equivalent to evaluating a vector-Jacobian product of `f`.
For details, please see section L of "`Fast Finite Width Neural Tangent Kernel
<https://arxiv.org/abs/2206.08720>`_".
Example:
>>> from jax import random
>>> import neural_tangents as nt
>>> from neural_tangents import stax
>>> #
>>> k1, k2, k3, k4 = random.split(random.PRNGKey(1), 4)
>>> x1 = random.normal(k1, (20, 32, 32, 3))
>>> x2 = random.normal(k2, (10, 32, 32, 3))
>>> #
>>> # Define a forward-pass function `f`.
>>> init_fn, f, _ = stax.serial(
>>> stax.Conv(32, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(32, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(32, (3, 3)),
>>> stax.Flatten(),
>>> stax.Dense(10)
>>> )
>>> #
>>> # Initialize parameters.
>>> _, params = init_fn(k3, x1.shape)
>>> #
>>> # NTK-vp function. Can/should be JITted.
>>> ntk_vp_fn = empirical_ntk_vp_fn(f, x1, x2, params)
>>> #
>>> # Cotangent vector
>>> cotangents = random.normal(k4, f(params, x2).shape)
>>> #
>>> # NTK-vp output
>>> ntk_vp = ntk_vp_fn(cotangents)
>>> #
>>> # Output has same shape as `f(params, x1)`.
>>> assert ntk_vp.shape == f(params, x1).shape
Args:
f:
forward-pass function of signature `f(params, x)`.
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`.
params:
A `PyTree` of parameters about which we would like to compute the neural
tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `f`. `apply_fn_kwargs` will be split into
`apply_fn_kwargs1` and `apply_fn_kwargs2` by the `split_kwargs` function
which will be passed to `f`. In particular, the rng key in
`apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or same
(if `x1==x2`) rng keys. See the `_read_key` function for more details.
Returns:
An NTK-vector product function accepting a `PyTree` of cotangents of shape
and structure of `f(params, x2)`, and returning the NTK-vector product of
shape and structure of `f(params, x1)`.
"""
args1, args2, fx1, fx2, fx_axis, keys, kw_axes, x_axis = _get_args(
f, apply_fn_kwargs, params, None, x1, x2)
f1, f2 = _get_f1_f2(f, keys, x_axis, fx_axis, kw_axes, args1 + args2, x1, x2)
def ntk_vp_fn(cotangents: PyTree) -> PyTree:
"""Computes a single empirical NTK-vector product.
Args:
cotangents:
a `PyTree` of cotangents. Must have the same shape and tree structure
as `f(params, x2)`.
Returns:
A single NTK-vector product of shape and tree structure of
`f(params, x1)`.
"""
vjp_out = vjp(f2, params)[1](cotangents)
jvp_out = jvp(f1, (params,), vjp_out)[1]
return jvp_out
return ntk_vp_fn
# INTERNAL UTILITIES
def _trace_and_diagonal(
ntk: np.ndarray,
trace_axes: Axes,
diagonal_axes: Axes
) -> np.ndarray:
"""Extract traces and diagonals along respective pairs of axes from the `ntk`.
Args:
ntk:
input empirical NTK of shape `(N1, X, Y, Z, ..., N2, X, Y, Z, ...)`.
trace_axes:
axes (among `X, Y, Z, ...`) to trace over, i.e. compute the trace along
and remove the respective pairs of axes from the `ntk`.
diagonal_axes:
axes (among `X, Y, Z, ...`) to take the diagonal along, i.e. extract the
diagonal along the respective pairs of axes from the `ntk` (and hence
reduce the resulting `ntk` axes count by 2).
Returns:
An array of shape, for example, `(N1, N2, Y, Z, Z, ...)` if
`trace_axes=(1,)` (`X` axes removed), and `diagonal_axes=(2,)` (`Y` axes
replaced with a single `Y` axis).
"""
if ntk.ndim % 2 == 1:
raise ValueError('Expected an even-dimensional kernel.')
output_ndim = ntk.ndim // 2
trace_axes = utils.canonicalize_axis(trace_axes, output_ndim)
diagonal_axes = utils.canonicalize_axis(diagonal_axes, output_ndim)
n_diag, n_trace = len(diagonal_axes), len(trace_axes)
contract_size = utils.size_at(ntk.shape[:output_ndim], trace_axes)
for i, c in enumerate(reversed(trace_axes)):
ntk = np.trace(ntk, axis1=c, axis2=output_ndim + c - i)
for i, d in enumerate(diagonal_axes):
axis1 = d - i
axis2 = output_ndim + d - 2 * i - n_trace
for c in trace_axes:
if c < d:
axis1 -= 1
axis2 -= 1
ntk = np.diagonal(ntk, axis1=axis1, axis2=axis2)
ntk = utils.zip_axes(ntk, 0, ntk.ndim - n_diag)
res_diagonal_axes = _get_res_batch_dims(trace_axes, diagonal_axes)
ntk = np.moveaxis(ntk, range(-n_diag, 0), res_diagonal_axes)
return ntk / contract_size
def _dict_of_tree_to_tree_of_dict(
out_dict: Dict[str, PyTree],
get: Tuple[str, ...]
) -> PyTree:
# If the elements of an output dict are tuples then change the representation
# to be a tuple of dicts instead. This occurs when the output of a network is
# a parallel layer.
return tree_map(lambda *x: dict((g, v) for g, v in zip(get, x)),
*[out_dict[g] for g in get])
def _get_f_params(
f: Callable,
x: PyTree,
x_axis: PyTree,
fx_axis: PyTree,
kw_axes: Dict[str, PyTree],
**apply_fn_kwargs
) -> Callable[[PyTree], PyTree]:
x = _expand_dims(x, x_axis)
apply_fn_kwargs = {
k: _expand_dims(v, kw_axes[k]) if k in kw_axes else v
for k, v in apply_fn_kwargs.items()
}
def _f(p: PyTree) -> PyTree:
fx = f(p, x, **apply_fn_kwargs)
return _squeeze(fx, fx_axis)
return _f
def _get_args(
f: Callable,
apply_fn_kwargs: Dict[str, PyTree],
params: PyTree,
vmap_axes: VMapAxes,
x1: PyTree,
x2: PyTree
):
kwargs1, kwargs2 = utils.split_kwargs(apply_fn_kwargs, x1, x2)
fx1 = eval_shape(f, params, x1, **kwargs1)
fx2 = fx1 if utils.all_none(x2) else eval_shape(f, params, x2, **kwargs2)
x_axis, fx_axis, kw_axes = _canonicalize_axes(vmap_axes, x1, fx1, **kwargs1)
keys = apply_fn_kwargs.keys()
args1 = tuple(kwargs1[k] for k in keys)
args2 = tuple(kwargs2[k] for k in keys)
return args1, args2, fx1, fx2, fx_axis, keys, kw_axes, x_axis
def _get_f1_f2(
f: Callable,
keys: KeysView[str],
x_axis: PyTree,
fx_axis: PyTree,
kw_axes: Dict[str, PyTree],
args: Tuple,
x1: PyTree,
x2: Optional[PyTree]
) -> Tuple[Callable[[PyTree], PyTree], Callable[[PyTree], PyTree]]:
args1, args2 = args[:len(args) // 2], args[len(args) // 2:]
_kwargs1 = {k: v for k, v in zip(keys, args1)}
_kwargs2 = {k: v for k, v in zip(keys, args2)}
f1 = _get_f_params(f, x1, x_axis, fx_axis, kw_axes, **_kwargs1)
f2 = f1 if utils.all_none(x2) else _get_f_params(
f, x2, x_axis, fx_axis, kw_axes, **_kwargs2)
return f1, f2
_ArrayOrShape = TypeVar('_ArrayOrShape', np.ndarray, ShapedArray)
def _check_einsum_no_broadcast(arrays: List[np.ndarray], dims: List[List[int]]):
"""Check that all matching einsum contracting axis sizes are equal.
Einsum allows silent broadcasting, and this function helps ensure it doesn't
happen.
"""
for idx_1, (a1, dims_1) in enumerate(zip(arrays, dims)):
if len(set(dims_1)) != len(dims_1):
raise ValueError(f'Dimensions {idx_1} contain duplicate axes: '
f'{dims_1}.')
for ax_1, dim_1 in enumerate(dims_1):
sz_idx_1 = a1.shape[ax_1]
for idx_2, (a2, dims_2) in enumerate(zip(arrays, dims)):
if dim_1 in dims_2:
ax_2 = dims_2.index(dim_1)
sz_idx_2 = a2.shape[ax_2]
if sz_idx_2 != sz_idx_1:
raise ValueError(f'Arrays {idx_1} and {idx_2} mismatch '
f'sizes at {ax_1} and {ax_2}: '
f'{sz_idx_1} != {sz_idx_2}')
def _expand_dims_array(x: _ArrayOrShape, axis: int) -> _ArrayOrShape:
def expand(x: np.ndarray) -> np.ndarray:
return np.expand_dims(x, axis)
if isinstance(x, ShapedArray):
return eval_shape(expand, x)
if isinstance(x, np.ndarray):
return expand(x)
raise TypeError(type(x), x)
def _expand_dims(
x: Union[Optional[PyTree], UndefinedPrimal],
axis: Optional[PyTree]
) -> Optional[PyTree]:
if axis is None or x is None or isinstance(x, UndefinedPrimal):
return x
return tree_map(_expand_dims_array, x, axis)
def _add(x: Optional[PyTree], y: Optional[PyTree]) -> Optional[PyTree]:
if x is None or y is None:
return None
return tree_map(operator.add, x, y)
def _sub(x: PyTree, y: PyTree) -> PyTree:
return tree_map(operator.sub, x, y)
def _div(x: PyTree, y: int) -> PyTree:
return tree_map(lambda x: x / y, x)
def _squeeze(x: PyTree, axis: Optional[PyTree]) -> PyTree:
if axis is None:
return x
def squeeze(
x: np.ndarray,
axis: Union[None, int, Tuple[int, ...]]
) -> np.ndarray:
"""`np.squeeze` analog working with 0-sized axes."""
if isinstance(axis, int):
axis = (axis,)
non_zero_axes = tuple()
shift = 0
for a in sorted(axis):
if x.shape[a - shift] == 0:
new_shape = x.shape[:a] + x.shape[a + 1:]
if utils.size_at(new_shape) == 0:
x = x.reshape(new_shape)
else:
x = np.zeros(new_shape, x.dtype)
shift += 1
else:
non_zero_axes += (a - shift,)
return np.squeeze(x, non_zero_axes)
return tree_map(squeeze, x, axis)
def _ndim(x: PyTree) -> PyTree:
return tree_map(lambda x: x.ndim, x)
def _mod(
x: Optional[PyTree],
y: PyTree
) -> PyTree:
if x is None:
return None
return tree_map(operator.mod, x, y)
def _diagonal(ntk: PyTree, fx: PyTree) -> PyTree:
ntk_flat, _ = tree_flatten(ntk)
fx_flat, fx_tree = tree_flatten(fx)
n = len(fx_flat)
diag = [ntk_flat[i * (n + 1)] for i in range(n)]
return tree_unflatten(fx_tree, diag)
def _canonicalize_axes(
vmap_axes: Optional[VMapAxes],
x: PyTree,
fx: PyTree,
**kwargs
) -> VMapAxisTriple:
if isinstance(vmap_axes, tuple) and len(vmap_axes) == 3:
x_axis, fx_axis, kw_axes = vmap_axes
else:
x_axis, fx_axis, kw_axes = vmap_axes, vmap_axes, {}
if isinstance(x_axis, int):
x_axis = tree_map(lambda _: x_axis, x)
if isinstance(fx_axis, int):
fx_axis = tree_map(lambda _: fx_axis, fx)
if isinstance(kw_axes, int):
kw_axes = tree_map(lambda _: kw_axes, kwargs)
x_axis = _mod(x_axis, _ndim(x))
fx_axis = _mod(fx_axis, _ndim(fx))
kw_axes = _mod(kw_axes, {k: _ndim(kwargs[k]) for k in kw_axes})
return x_axis, fx_axis, kw_axes
def _to_tuple_tree(x: PyTree) -> Tuple:
"""Replace all lists and dictionaries with tuples in a PyTree for hashing."""
if isinstance(x, (tuple, list)):
return tuple(_to_tuple_tree(x_i) for x_i in x)
if isinstance(x, dict):
return tuple((k, _to_tuple_tree(v)) for k, v in sorted(x.items()))
return x
def _ntk_shape(fx1_shape, fx2_shape, trace_axes: Axes, diagonal_axes: Axes):
ntk_shape = ()
trace_axes = utils.canonicalize_axis(trace_axes, fx1_shape)
diagonal_axes = utils.canonicalize_axis(diagonal_axes, fx1_shape)
for i, (a1, a2) in enumerate(zip(fx1_shape, fx2_shape)):
if i not in trace_axes:
if i in diagonal_axes:
assert a1 == a2
ntk_shape += (a1,)
else:
ntk_shape += (a1, a2)
else:
assert a1 == a2
return ntk_shape
def _get_dims(
df_dy_1: np.ndarray,
df_dy_2: np.ndarray,
ndim: int,
trace_axes: Axes,
diagonal_axes: Axes
) -> Tuple[List[int], List[int], List[int]]:
df_dy_dims_1 = list(range(df_dy_1.ndim))
df_dy_dims_2 = list(range(df_dy_1.ndim, df_dy_1.ndim + df_dy_2.ndim))
out_dims = []
for i in range(ndim):
if i in trace_axes:
assert df_dy_1.shape[i] == df_dy_2.shape[i]
df_dy_dims_2[i] = df_dy_dims_1[i]
elif i in diagonal_axes:
assert df_dy_1.shape[i] == df_dy_2.shape[i]
df_dy_dims_2[i] = df_dy_dims_1[i]
out_dims += [df_dy_dims_1[i]]
else:
out_dims += [df_dy_dims_1[i], df_dy_dims_2[i]]
return df_dy_dims_1, df_dy_dims_2, out_dims
def _is_abstract_array(x) -> bool:
return isinstance(x, np.ndarray) or isinstance(
getattr(x, 'aval', None), core.ShapedArray)
def _vmap(f: Callable, in_axes, out_axes, squeeze_out: bool = True) -> Callable:
"""An expand-then-squeeze `vmap` for `f` expecting/returning batch dims."""
in_axes_plus_1 = tree_map(lambda x: x if x in (None, -1) else x + 1, in_axes)
@utils.wraps(f)
def f_vmapped(*args):
args = tree_map(
_expand_dims, args, in_axes_plus_1, is_leaf=_is_abstract_array)
out = vmap(f, in_axes, out_axes)(*args)
if squeeze_out:
out_axes_plus_1 = tree_map(
lambda x: x if x in (None, -1) else x + 1, out_axes)
out = _squeeze(out, out_axes_plus_1)
return out
return f_vmapped
def _get_fx_axis_and_dtype(fx, fx_axis, params: PyTree):
if fx_axis is None:
fx_axis = tree_map(lambda x: None, fx)
# Set the default type to be the least common type ancestor.
dtypes, _ = tree_flatten(tree_map(np.dtype, params))
if not dtypes:
dtype = None
else:
dtype = functools.reduce(np.promote_types, dtypes)
return fx_axis, dtype
def _unravel_dfs(dfs: PyTree, params: PyTree, y: PyTree) -> PyTree:
dfs = tree_map(functools.partial(_unravel_array_into_pytree, y, 0), dfs)
if tree_structure(dfs).num_leaves > 0:
dfs = tree_transpose(tree_structure(tree_map(lambda x, y: [x] * len(y),
params,
dfs)),
tree_structure(y), dfs)
if tree_structure(dfs).num_leaves == 0:
dfs = tree_map(lambda x: dfs, y)
return dfs
class _MODE(enum.Enum):
"""`F` - final output; `Y` - intermediary pre-activations; `W` - weights."""
DF_DY = 'DF_DY'
DY_DW = 'DY_DW'
def _get_df_dys_and_dy_dws(
fn: Callable[[PyTree], PyTree],
params: PyTree,
_j_rules: bool,
_s_rules: bool,
_fwd: Optional[bool]
) -> Tuple[PyTree, PyTree]:
"""Computes primitive output cotangents (`df/dy`) and Jacobians (`dy/dw`)."""
def primals_out_and_pullback(mode: _MODE) -> PyTree:
return _get_primals_out_and_pullback(fn, mode, _j_rules, _s_rules, _fwd,
params)
primals_out, pullback_df_dy = primals_out_and_pullback(_MODE.DF_DY)
df_dys = vmap(pullback_df_dy)(_std_basis(primals_out))
df_dys = _unravel_dfs(df_dys[0], params, primals_out)
_, pullback_dy_dw = primals_out_and_pullback(_MODE.DY_DW)
dy_dws = pullback_dy_dw(primals_out) # values of `primals_out` don't matter.
dy_dws = dy_dws[0]
return df_dys, dy_dws
def _get_primals_out_and_pullback(
fn: Callable[[PyTree], PyTree],
mode: _MODE,
_j_rules: bool,
_s_rules: bool,
_fwd: Optional[bool],
*primals_in: PyTree
) -> Tuple[PyTree, Callable]:
"""Adapted from `jax.interpreters.ad`.
Returns outputs of `fn` and the "pullback" function, which is similar to the
regular pullback function (computing cotangents to `primals_in` given output
cotangents), but collects and returns other quantities.
"""
primals_in_flat, in_tree = tree_flatten(primals_in)
fn_flat, out_tree = jax.flatten_fun_nokwargs(lu.wrap_init(fn), in_tree)
# TODO(romann): handle call primitives more gracefully.
with jax.disable_jit():
outs = ad.linearize(fn_flat, *primals_in_flat, has_aux=False)
primals_out, pvals, jaxpr, consts = outs
primals_out = tree_unflatten(out_tree(), primals_out)
def pullback_fn(*cts_in: PyTree):
cts_in, _ = tree_flatten(cts_in)
cts_in = tuple(ct for ct, pval in zip(cts_in, pvals) if not pval.is_known())
dummy_args = [UndefinedPrimal(v.aval) for v in jaxpr.invars]
cts_out = _backward_pass(jaxpr, mode=mode, consts=consts,
primals_in=dummy_args, cotangents_in=cts_in,
_j_rules=_j_rules, _s_rules=_s_rules, _fwd=_fwd)
return tree_unflatten(in_tree, cts_out)
return primals_out, pullback_fn
def _backward_pass(
jaxpr: Jaxpr,
mode: _MODE,
consts: List[Value],
primals_in: List[UndefinedPrimal],
cotangents_in: Tuple[np.ndarray, ...],
_j_rules: bool,
_s_rules: bool,
_fwd: Optional[bool]
) -> Union[List[List[Union[np.ndarray, Zero]]],
List[List[Tuple[np.ndarray, rules.Structure]]]]:
"""Similar to and adapted from `jax.interpreters.ad.backward_pass`.
Traverses the computational graph in the same order as the above, but collects
and returns _not_ the cotangents wrt `jaxpr.invars`, but rather primitive
output cotangents (`df/dy`) and Jacobians (`dy/dw`). Precisely:
`mode=_MODE.DF_DY`: cotangents wrt outputs of equations where `jaxpr.invars`
are inputs.
`mode=_MODE.DY_DF`: Jacobians (of outputs wrt inputs that are within
`jaxpr.invars`) of equations to which `jaxpr.invars` are inputs. Jacobians
are accompanied by their `rules.Structure` metadata.
The above are then efficiently contracted with each other elsewhere to compute
the NTK.
"""
def read_cotangent(v: Var) -> Union[np.ndarray, Zero]:
return ct_env.pop(v, Zero(v.aval))
primal_env: Dict[Var, np.ndarray] = {}
map(functools.partial(_write_primal, primal_env), jaxpr.constvars, consts)
map(functools.partial(_write_primal, primal_env), jaxpr.invars, primals_in)
ct_env: Dict[Var, np.ndarray] = {}
ctx = ad.source_info_util.transform_name_stack('transpose')
with ctx:
map(functools.partial(_write_cotangent, 'outvars', ct_env),
jaxpr.outvars, cotangents_in)
# List of `df_dy`s or `dy_dw`s for each variable in `jaxpr.invars`.
outs = [[] for _ in jaxpr.invars]
if mode == _MODE.DY_DW:
invar_to_structure = rules.get_structure_cache(jaxpr, _s_rules=_s_rules)
vars_needing_cts_in = set()
elif mode == _MODE.DF_DY:
vars_needing_cts_in = _get_vars_needing_cts_in(jaxpr)
else:
raise ValueError(f'Unrecognized mode {mode}.')
for eqn in jaxpr.eqns[::-1]:
# Do regular backprop.
cts_in, invals = _backprop_step(
eqn=eqn,
primal_env=primal_env,
ct_env=ct_env,
read_cotangent=read_cotangent,
do_write_cotangents=any(
not isinstance(i, Literal) and i in vars_needing_cts_in
for i in eqn.invars
)
)
# Compute `df_dy`s or `dy_dw`s.
for i_eqn, eq_invar in enumerate(eqn.invars):
if eq_invar in jaxpr.invars:
i_jaxpr = jaxpr.invars.index(eq_invar)
inval = invals[i_eqn].aval
if mode == _MODE.DF_DY:
if not isinstance(cts_in, Zero):
if eqn.primitive == lax.reshape_p:
cts_in = cts_in.reshape(inval.shape)
cts_in = cts_in.astype(inval.dtype)
outs[i_jaxpr] += [cts_in]
elif mode == _MODE.DY_DW:
structure = rules.get_structure(
eqn=eqn,
invals=[v.aval for v in eqn.invars],
idx=i_eqn,
_s_rules=_s_rules
)
structure &= invar_to_structure[eq_invar]
if eqn.primitive == lax.reshape_p:
cts_in = ShapedArray(inval.shape, inval.dtype)
elif hasattr(cts_in, 'aval'):
cts_in = cts_in.aval
trimmed_invals = _trim_invals(invals, structure)
if not isinstance(cts_in, ShapedArray):
raise TypeError(cts_in)
trimmed_cts_in = _trim_cotangents(cts_in, structure)
if _s_rules:
eqn = _trim_eqn(eqn, i_eqn, trimmed_invals, trimmed_cts_in)
def j_fn(invals):
return _get_jacobian(eqn=eqn,
cts_in=trimmed_cts_in,
invals=invals,
idx=i_eqn,
_fwd=_fwd,
_j_rules=_j_rules)
for in_d, out_d in zip(structure.in_diagonal, structure.out_diagonal):
in_axes = [
None
if isinstance(invals[ix], UndefinedPrimal)
else i
for ix, i in enumerate(in_d)]
j_fn = _vmap(j_fn, in_axes=(in_axes,), out_axes=out_d)
dy_dw = j_fn(trimmed_invals)
outs[i_jaxpr] += [(dy_dw, structure)]
else:
raise ValueError(f'Unrecognized mode {mode}.')
# If output contains any of `primals_in`, this "identity" primitive is not
# present in `jaxpr.eqns`. Below we treat this case by passing `cotangents_in`
# as `df_dy`, and an identity matrix as `dy_dw`.
for i_in, v_out in enumerate(jaxpr.outvars):
for i_eqn, v in enumerate(jaxpr.invars):
if v == v_out:
if mode == _MODE.DF_DY:
if v in ct_env:
df_dy = cotangents_in[i_in]
else:
df_dy = v.aval
outs[i_eqn] += [df_dy]
break
elif mode == _MODE.DY_DW:
# Identity function
structure = rules.get_id_structure(v.aval, _s_rules)
structure &= invar_to_structure[v]
# Identity Jacobian
trimmed_invals = _trim_invals([UndefinedPrimal(v.aval)], structure)
if not isinstance(v.aval, ShapedArray):
raise TypeError(v.aval)
trimmed_cts_in = _trim_cotangents(v.aval, structure)
dy_dw = _get_jacobian(
eqn=None,
cts_in=trimmed_cts_in,
invals=trimmed_invals,
idx=0,
_j_rules=_j_rules,
_fwd=_fwd,
)
outs[i_eqn] += [(dy_dw, structure)]
else:
raise ValueError(f'Unrecognized mode {mode}.')
return outs
def _get_vars_needing_cts_in(jaxpr: Jaxpr) -> Set[Var]:
"""Get a set of variables that need cotangents for structured derivatives.
Specifically, returns variables which are outputs of equations to which
`jaxpr.invars` are inputs. Cotangents `df/dy` to these variables are needed
elsewhere to compute the NTK.
"""
need_cts: Set[Var] = set()
def visit(vs: Set[Var]):
if len(vs) == 0:
return
next_visit = set()
for e in jaxpr.eqns:
if any(v in e.invars for v in vs):
for o in e.outvars:
if o not in need_cts:
need_cts.add(o)
next_visit.add(o)
visit(next_visit)
visit(set(jaxpr.invars))
# `invars` don't need cotangents in `STRUCTURED_DERIVATIVES` mode.
assert all(i not in need_cts for i in jaxpr.invars)
return need_cts
def _backprop_step(
eqn: JaxprEqn,
primal_env: Dict[Var, np.ndarray],
ct_env: Dict[Var, np.ndarray],
read_cotangent: Callable[[Var], Union[np.ndarray, Zero]],
do_write_cotangents: bool = True
) -> Tuple[Union[np.ndarray, Zero],
List[Union[np.ndarray, UndefinedPrimal]]]:
"""Adapted from `jax.interpreters.ad`."""
invals = map(functools.partial(_read_primal, primal_env), eqn.invars)
cts_in = map(read_cotangent, eqn.outvars)
if len(cts_in) == 1:
cts_in = cts_in[0]
else:
raise NotImplementedError(
f'Primitives with multiple outputs are not supported. '
f'Please file a bug at '
f'https://github.com/google/neural-tangents/issues. '
f'Got {len(eqn.outvars)} outputs for {eqn}, with input '
f'cotangents {cts_in}.')
if do_write_cotangents:
cts_out = _eqn_vjp_fn(eqn, cts_in, *invals)
cts_out = [Zero(v.aval) for v in eqn.invars] if cts_out is Zero else cts_out
map(functools.partial(_write_cotangent, eqn.primitive, ct_env),
eqn.invars, cts_out)
return cts_in, invals
def _trim_cotangents(
cts_in: ShapedArray,
structure: rules.Structure
) -> ShapedArray:
cts_in = _trim_axis(
cts_in,
structure.out_trace + structure.out_broadcast + structure.out_diagonal)
cts_in: ShapedArray
return cts_in
def _trim_invals(
invals: List[Union[np.ndarray, UndefinedPrimal]],
structure: rules.Structure,
) -> List[Union[np.ndarray, UndefinedPrimal]]:
trimmed_invals = list(invals)
for i in structure.in_trace_idxs:
trimmed_invals[i] = _trim_axis(trimmed_invals[i], structure.in_trace)
for ax in structure.in_broadcast:
trimmed_invals[structure.in_broadcast_idx] = _trim_axis(
trimmed_invals[structure.in_broadcast_idx], ax)
for ax in structure.out_broadcast:
for i in structure.out_broadcast_idxs:
trimmed_invals[i] = _trim_axis(trimmed_invals[i], ax)
for i in range(len(trimmed_invals)):
for in_d in sorted([axis[i] for axis in structure.in_diagonal
if axis[i] is not None],
reverse=True):
if isinstance(trimmed_invals[i], UndefinedPrimal):
trimmed_invals[i] = _trim_axis(trimmed_invals[i], in_d)
return trimmed_invals # pytype: disable=bad-return-type # jax-ndarray
def _trim_eqn(
eqn: JaxprEqn,
idx: int,
trimmed_invals: List[Union[np.ndarray, UndefinedPrimal]],
trimmed_cts_in: ShapedArray
) -> JaxprEqn:
if eqn.primitive in rules.EQN_PARAMS_RULES:
# Copy the equation parameters to modify.
trimmed_invals_e = [i.aval if isinstance(i, UndefinedPrimal) else i for i in
trimmed_invals]
params = rules.EQN_PARAMS_RULES[eqn.primitive](
params=dict(eqn.params),
idx=idx,
trimmed_invals=trimmed_invals_e,
trimmed_cts_in=trimmed_cts_in
)
eqn = eqn.replace(params=params)
return eqn
def _trim_axis(
x: Union[UndefinedPrimal, ShapedArray, np.ndarray],
axis: Union[int, Tuple[int, ...]],
) -> Union[UndefinedPrimal, ShapedArray]:
"""Trim `axis` of `x` to be of length `1`. `x` is only used for shape."""
if isinstance(axis, int):
axis = (axis,)
if isinstance(x, UndefinedPrimal):
return UndefinedPrimal(_trim_axis(x.aval, axis))
if isinstance(x, (ShapedArray, np.ndarray)):
return ShapedArray([1 if i in axis else x.shape[i]
for i in range(x.ndim)], dtype=x.dtype)
raise TypeError(type(x), x)
def _eqn_jvp_fn(
eqn: Optional[JaxprEqn],
idx: int,
tangents: np.ndarray,
*invals
) -> np.ndarray:
"""Perform a JVP for `eqn`."""
if eqn is None:
# Identity function
return tangents
new_tangents = []
new_invals = []
for i_dx, i in enumerate(invals):
if i_dx == idx:
inval = np.zeros(i.aval.shape, i.aval.dtype)
tangent = tangents
else:
inval = i
aval = i.aval if hasattr(i, 'aval') else ShapedArray(i.shape, i.dtype)
tangent = Zero(aval)
if isinstance(inval, (UndefinedPrimal, ShapedArray)):
inval = np.zeros(aval.shape, aval.dtype)
new_invals.append(inval)
new_tangents.append(tangent)
jvp_fn = ad.primitive_jvps[eqn.primitive]
out = jvp_fn(new_invals, new_tangents, **eqn.params)[1]
if isinstance(out, list) and len(out) == 1:
return out[0]
elif isinstance(out, jax.Array):
return out
raise TypeError(out, type(out))
def _eqn_vjp_fn(
eqn: Optional[JaxprEqn],
cts_in: np.ndarray,
*invals
) -> Tuple[np.ndarray, ...]:
"""Perform a VJP for `eqn`. Adapted from `jax.interpreters.ad`."""
if eqn is None:
# Identity function
return cts_in,
name_stack = (ad.source_info_util.current_name_stack() +
eqn.source_info.name_stack)
with ad.source_info_util.user_context(eqn.source_info.traceback,
name_stack=name_stack):
if eqn.primitive.call_primitive or eqn.primitive.map_primitive:
cts_in_avals = [v.aval for v in eqn.outvars]
params = dict(eqn.params)
call_jaxpr = params.pop('call_jaxpr')
cts_out = ad.get_primitive_transpose(eqn.primitive)(
params, call_jaxpr, invals, cts_in, cts_in_avals, ())
elif eqn.primitive in ad.reducing_transposes:
cts_out = ad.reducing_transposes[eqn.primitive](
(), (cts_in,), *invals, **eqn.params)
else:
cts_out = ad.get_primitive_transpose(eqn.primitive)(cts_in, *invals,
**eqn.params)
return cts_out
def _get_jacobian(
eqn: Optional[JaxprEqn],
cts_in: ShapedArray,
invals: List[Union[np.ndarray, UndefinedPrimal]],
idx: int,
_j_rules: bool,
_fwd: Optional[bool],
) -> Union[np.ndarray, Zero]:
"""Get the (structured) `eqn` output Jacobian wrt `eqn.invars[idx]`."""
if eqn is None:
primitive = None
else:
primitive = eqn.primitive
inval_shape = invals[idx].aval.shape
cts_in_shape = cts_in.shape
dy_dw_shape = cts_in_shape + inval_shape
if primitive not in rules.JACOBIAN_RULES:
warnings.warn(f'No Jacobian rule found for {primitive}.')
if primitive in rules.JACOBIAN_RULES and _j_rules:
# Custom Jacobian rule.
invals_j = [i.aval if isinstance(i, UndefinedPrimal) else i for i in invals]
dy_dw = rules.JACOBIAN_RULES[primitive](eqn, idx, invals_j, cts_in)
else:
# Vanilla Jacobian evaluation.
if _get_fwd(_fwd, cts_in_shape, inval_shape): # pytype: disable=wrong-arg-types # always-use-return-annotations
# Forward mode.
out_axes = -1
inputs = invals[idx].aval
def jac_fn(tangents):
return _eqn_jvp_fn(eqn, idx, tangents, *invals)
else:
# Reverse mode.
out_axes = 0
inputs = cts_in
def jac_fn(cotangents):
return _eqn_vjp_fn(eqn, cotangents, *invals)[idx]
eye = _std_basis(inputs)
dy_dw = vmap(jac_fn, out_axes=out_axes)(eye)
if isinstance(dy_dw, Zero):
dy_dw = Zero(ShapedArray(dy_dw_shape, cts_in.dtype))
else:
dy_dw = dy_dw.reshape(dy_dw_shape)
dy_dw_shape_ = dy_dw.aval.shape if isinstance(dy_dw, Zero) else dy_dw.shape # pytype:disable=attribute-error
assert dy_dw_shape_ == dy_dw_shape, (dy_dw_shape_, dy_dw_shape)
return dy_dw
def _write_cotangent(
prim: core.Primitive,
ct_env: Dict[Var, np.ndarray],
v: Var,
ct: Union[np.ndarray, Zero]
):
"""Adapted from `jax.interpreters.ad`."""
assert ct is not Zero, (prim, v.aval)
if ct is None or type(v) is Literal:
return
if type(ct) is Zero:
return
ct_env[v] = ad.add_tangents(ct_env[v], ct) if v in ct_env else ct
if ad.config.jax_enable_checks:
ct_aval = core.get_aval(ct_env[v])
joined_aval = core.lattice_join(
v.aval, ct_aval).strip_weak_type().strip_named_shape()
assert v.aval.strip_weak_type().strip_named_shape() == joined_aval, (
prim, v.aval, ct_aval)
def _read_primal(
env: Dict[Var, np.ndarray],
v: Union[Var, Literal],
) -> Union[np.ndarray, UndefinedPrimal]:
if type(v) is Literal:
return v.val
a = v.aval
if type(a) is core.DShapedArray:
shape = [env[d] if type(d) is core.Var else d for d in a.shape]
a = a.update(shape=tuple(shape))
return env.get(v, UndefinedPrimal(a))
def _write_primal(
env: Dict[Var, np.ndarray],
v: Var,
val: Union[np.ndarray, UndefinedPrimal]
):
if not ad.is_undefined_primal(val):
env[v] = val # pytype: disable=container-type-mismatch # jax-ndarray
def _get_fwd(
_fwd: Optional[bool],
cts_in_shape: Tuple[int, ...],
inval_shape: Tuple[int, ...]
) -> bool:
if _fwd is None:
out_size = onp.prod(cts_in_shape)
in_size = onp.prod(inval_shape)
_fwd = out_size > in_size
return _fwd
def _get_flops(f: Callable, optimize: bool, *a, **kw) -> float:
e = jax.jit(f).lower(*a, **kw)
if optimize:
analysis = e.compile().cost_analysis()[0]
else:
analysis = e.cost_analysis()
return analysis['flops']
def _std_basis(pytree: PyTree) -> PyTree:
"""Similar to `jax.api._std_basis` without host-side ops."""
leaves, _ = tree_flatten(pytree)
ndim = sum(map(np.size, leaves))
dtype = jax.dtypes.result_type(*leaves)
flat_basis = np.eye(ndim, dtype=dtype)
return _unravel_array_into_pytree(pytree, 1, flat_basis)
def _unravel_array_into_pytree(
pytree: PyTree,
axis: int,
arr: np.ndarray
) -> PyTree:
"""Similar to `jax.api._unravel_array_into_pytree` without host-side ops."""
leaves, treedef = tree_flatten(pytree)
if arr.ndim > 0:
axis %= arr.ndim
shapes = [arr.shape[:axis] + np.shape(l) + arr.shape[axis+1:] for l in leaves]
parts = np.split(arr, onp.cumsum([np.size(l) for l in leaves[:-1]]), axis)
reshaped_parts = [np.reshape(x, shape) for x, shape in zip(parts, shapes)]
return tree_unflatten(treedef, reshaped_parts)
def _get_res_batch_dims(
contracting_dims: Iterable[int],
batch_dims: Iterable[int]
) -> List[int]:
res_batch_dims = [2 * b - i for i, b in enumerate(batch_dims)]
for i, b in enumerate(batch_dims):
for c in contracting_dims:
if b > c:
res_batch_dims[i] -= 2
return res_batch_dims
def _dot_general(
lhs: np.ndarray,
rhs: np.ndarray,
contracting_dims: Axes,
batch_dims: Axes,
precision=None
) -> np.ndarray:
"""`jax.lax.dot_general` with preserved dims order and shared lhs / rhs dims.
Precisely, returns `jax.lax.dot_general(lhs, rhs, dimension_numbers)` where
`dimension_numbers == ((contracting_dims, contracting_dims),
(batch_dims, batch_dims))`,
but preserves the dimension order in the output. See XLA's
`DotGeneral<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`.
Args:
lhs: array.
rhs: array, must have the same dimensionality as `lhs`.
contracting_dims: contracting dimensions.
batch_dims: batch dimensions.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
Dot product result with preserved dimension order.
"""
if lhs.ndim != rhs.ndim:
raise ValueError(f'`lhs` and `rhs` must have the same dimensionality, got'
f'`lhs.ndim == {lhs.ndim}` and `rhs.ndim == {rhs.ndim}`.')
contracting_dims = utils.canonicalize_axis(contracting_dims, lhs)
batch_dims = utils.canonicalize_axis(batch_dims, lhs)
n_batch_dims = len(batch_dims)
leading_batch_dims = range(n_batch_dims)
dimension_numbers = ((contracting_dims, contracting_dims),
(batch_dims, batch_dims))
prod = lax.dot_general(lhs, rhs, dimension_numbers, precision)
prod = utils.zip_axes(prod, n_batch_dims)
res_batch_dims = _get_res_batch_dims(contracting_dims, batch_dims)
prod = np.moveaxis(prod, leading_batch_dims, res_batch_dims)
return prod
| 85,801 | 35.325995 | 117 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/batching.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch kernel computations serially or in parallel.
This module contains a decorator `batch` that can be applied to any `kernel_fn`
of signature `kernel_fn(x1, x2, *args, **kwargs)`. The decorated function
performs the same computation by batching over `x1` and `x2` and concatenating
the result, allowing to both use multiple accelerators and stay within memory
limits.
Note that you typically should not apply the :obj:`jax.jit` decorator to the
resulting `batched_kernel_fn`, as its purpose is explicitly serial execution in
order to save memory. Further, you do not need to apply :obj:`jax.jit` to the
input `kernel_fn` function, as it is JITted internally.
Example:
>>> from jax import numpy as np
>>> import neural_tangents as nt
>>> from neural_tangents import stax
>>> #
>>> # Define some kernel function.
>>> _, _, kernel_fn = stax.serial(stax.Dense(1), stax.Relu(), stax.Dense(1))
>>> #
>>> # Compute the kernel in batches, in parallel.
>>> kernel_fn_batched = nt.batch(kernel_fn, device_count=-1, batch_size=5)
>>> #
>>> # Generate dummy input data.
>>> x1, x2 = np.ones((40, 10)), np.ones((80, 10))
>>> kernel_fn_batched(x1, x2) == kernel_fn(x1, x2) # True!
"""
from typing import Callable, Tuple, Union, Dict, Any, TypeVar, Iterable, Optional
from functools import partial
import warnings
import jax
from jax import device_put, devices
from jax import jit
from jax import pmap
from jax import random
import jax.numpy as np
from jax.tree_util import tree_all
from jax.tree_util import tree_map
from jax.tree_util import tree_flatten, tree_unflatten
from .utils.kernel import Kernel
from .utils import utils
from .utils.typing import KernelFn, NTTree
import numpy as onp
# A type variable to indicate that `nt.batch` and other functions here do not
# change the function signature.
_KernelFn = TypeVar('_KernelFn', bound=KernelFn)
def batch(kernel_fn: _KernelFn,
batch_size: int = 0,
device_count: int = -1,
store_on_device: bool = True) -> _KernelFn:
"""Returns a function that computes a kernel in batches over all devices.
Note that you typically should not apply the `jax.jit` decorator to the
resulting `batched_kernel_fn`, as its purpose is explicitly serial execution
in order to save memory. Further, you do not need to apply `jax.jit` to the
input `kernel_fn` function, as it is JITted internally.
Args:
kernel_fn:
A function that computes a kernel on two batches,
`kernel_fn(x1, x2, *args, **kwargs)`. Here `x1` and `x2` are
`np.ndarray`s of shapes `(n1,) + input_shape` and `(n2,) + input_shape`.
The kernel function should return a `PyTree`.
batch_size:
specifies the size of each batch that gets processed per physical device.
Because we parallelize the computation over columns it should be the case
that `x1.shape[0]` is divisible by `device_count * batch_size` and
`x2.shape[0]` is divisible by `batch_size`.
device_count:
specifies the number of physical devices to be used. If
`device_count == -1` all devices are used. If `device_count == 0`, no
device parallelism is used (a single default device is used).
store_on_device:
specifies whether the output should be kept on device or brought back to
CPU RAM as it is computed. Defaults to `True`. Set to `False` to store
and concatenate results using CPU RAM, allowing to compute larger kernels.
Returns:
A new function with the same signature as `kernel_fn` that computes the
kernel by batching over the dataset in parallel with the specified
`batch_size` using `device_count` devices.
"""
# TODO(romann): find a way to avoid reading requirements.
input_req = getattr(kernel_fn, 'input_req', {})
dropout_in_analytic_kernel = input_req.get('use_dropout', False)
use_multidevice = device_count > 0 or (device_count == -1 and
jax.local_device_count() > 1)
use_serial = bool(batch_size)
if use_multidevice:
kernel_fn = _parallel(kernel_fn, use_serial,
dropout_in_analytic_kernel, device_count)
else:
kernel_fn = _jit_or_pmap_broadcast(kernel_fn, 0)
if not use_serial:
return kernel_fn
return _serial(kernel_fn, batch_size, store_on_device)
# INTERNAL UTILITIES
_Carry = TypeVar('_Carry')
_Input = TypeVar('_Input')
_Output = TypeVar('_Output')
def _scan(f: Callable[[_Carry, _Input], Tuple[_Carry, _Output]],
init: _Carry,
xs: Iterable[_Input]) -> Tuple[_Carry, _Output]:
"""Implements an unrolled version of scan.
Based on :obj:`jax.lax.scan` and has a similar API.
TODO(schsam): We introduce this function because lax.scan currently has a
higher peak memory usage than the unrolled version. We will aim to swap this
out for lax.scan when issue #1273 and related have been resolved.
"""
carry = init
ys = []
flat_xs, tree_def = tree_flatten(xs)
for flat_x in zip(*flat_xs):
x = tree_unflatten(tree_def, flat_x)
carry, y = f(carry, x)
ys += [y]
return carry, tree_map(lambda *y: np.stack(y), *ys)
def _flatten_batch_dimensions(k: np.ndarray,
is_parallel: bool,
discard_axis: Optional[int] = None) -> np.ndarray:
"""Takes a kernel that has been evaluated in batches and flattens."""
if discard_axis is not None:
if not is_parallel:
k = np.take(k, 0, axis=discard_axis)
return np.reshape(k, (-1,) + k.shape[2:])
if discard_axis == 1:
return np.reshape(k, (k.shape[0] * k.shape[1],) + k.shape[2:])
return k[0]
else:
if is_parallel:
return np.reshape(k, (k.shape[0] * k.shape[1],) + k.shape[2:])
k = np.transpose(k, (0, 2, 1, 3) + tuple(range(4, k.ndim)))
return np.reshape(k,
(k.shape[0] * k.shape[1],
k.shape[2] * k.shape[3]) + k.shape[4:])
@utils.nt_tree_fn(nargs=1)
def _flatten_kernel_dict(k: Dict[str, Any],
x2_is_none: bool,
is_parallel: bool) -> Dict[str, Any]:
if 'nngp' in k:
# We only use `batch_size` to compute `shape1` and `shape2` for the batch.
# This only happens if k_dict came from a `Kernel` in which case it must
# have 'nngp'. I do think there is a failure case if the user called
# >>> batched_kernel_fn(x1, x2, get=('ntk', 'shape1'))
# but I don't think this will get hit ever (and certainly before we rework
# this code).
batch_size = {'1': k['nngp'].shape[0], '2': k['nngp'].shape[1]}
if 'diagonal_batch' in k and not k['diagonal_batch']:
raise NotImplementedError('Batching not implemented for '
'`diagonal_batch == False`.')
for key, value in k.items():
if key == 'cov1':
k[key] = _flatten_batch_dimensions(value, is_parallel, 1)
elif key == 'cov2':
if x2_is_none:
k[key] = None
else:
k[key] = _flatten_batch_dimensions(value, is_parallel, 0)
elif key == 'x1_is_x2':
k[key] = value[(0,) * value.ndim]
elif key == 'mask1':
if value is None:
k[key] = None
else:
k[key] = _flatten_batch_dimensions(value, is_parallel, 1)
elif key == 'mask2':
if value is None or x2_is_none:
k[key] = None
else:
k[key] = -_flatten_batch_dimensions(value, is_parallel, 0)
elif key in ('shape1', 'shape2'):
if key == 'shape2' and is_parallel:
continue
batch_axis = k['batch_axis']
shape = value
k[key] = (shape[:batch_axis] +
(shape[batch_axis] * batch_size[key[-1]],) +
shape[batch_axis + 1:])
elif isinstance(k[key], (onp.ndarray, np.ndarray)):
k[key] = _flatten_batch_dimensions(value, is_parallel)
else:
pass
return k
@utils.nt_tree_fn(nargs=1)
def _flatten_kernel(k: Kernel,
x2_is_none: bool,
is_parallel: bool) -> Kernel:
"""Flattens a kernel array or a `Kernel` along the batch dimension."""
if hasattr(k, '_asdict') and hasattr(k, '_replace'):
return k._replace(**_flatten_kernel_dict(k._asdict(), x2_is_none,
is_parallel))
elif isinstance(k, Kernel):
return Kernel(**_flatten_kernel_dict(k.asdict(), x2_is_none, is_parallel))
elif isinstance(k, (onp.ndarray, np.ndarray)):
return _flatten_batch_dimensions(k, is_parallel)
raise TypeError(f'Expected kernel to be either a namedtuple, `Kernel`, or '
f'`np.ndarray`, got {type(k)}.')
@utils.nt_tree_fn(nargs=1)
def _reshape_kernel_for_pmap(k: Kernel,
device_count: int,
n1_per_device: int) -> Kernel:
cov2 = k.cov2
if cov2 is None:
cov2 = k.cov1
cov2 = np.broadcast_to(cov2, (device_count,) + cov2.shape)
mask2 = k.mask2
if mask2 is None and k.mask1 is not None:
mask2 = k.mask1
if mask2 is not None:
mask2 = np.broadcast_to(mask2, (device_count,) + mask2.shape)
x1_is_x2 = np.broadcast_to(k.x1_is_x2, (device_count,) + k.x1_is_x2.shape)
nngp, ntk, cov1 = [
np.reshape(x, (device_count, n1_per_device,) + x.shape[1:]) for x in
(k.nngp, k.ntk, k.cov1)]
return k.replace(
nngp=nngp,
ntk=ntk,
cov1=cov1,
cov2=cov2,
x1_is_x2=x1_is_x2,
shape1=(n1_per_device,) + k.shape1[1:],
mask2=mask2)
_ArrayOrKernel = TypeVar('_ArrayOrKernel', np.ndarray, Kernel)
@utils.nt_tree_fn()
def _set_cov2_to_none(k: _ArrayOrKernel) -> _ArrayOrKernel:
if isinstance(k, Kernel):
k = k.replace(cov2=None) # pytype: disable=attribute-error # jax-ndarray
return k
def _serial(kernel_fn: _KernelFn,
batch_size: int,
store_on_device: bool = True) -> _KernelFn:
"""Returns a function that computes a kernel in batches serially.
This function computes the kernel over data in batches where each batch is
processed sequentially with a given batch size. If serial detects that the
kernel function is the result of `_parallel` (that is, if the kernel is
distributed over multiple devices) then serial adjusts the batch size so that
each device processes chunks of work that have batch_size x batch_size.
The dataset size must divide the effective batch size. If parallelism is used
this means that `|x1|` must divide `batch_size * device_count` and `|x2|` must
divide `batch_size`.
Args:
kernel_fn:
A function that computes a kernel between two datasets,
`kernel_fn(x1, x2)` or the compositional kernel for an input kernel
`kernel_fn(kernel_in)`. Here x1 and x2 are `np.ndarray`s of floats of
shape `(n1,) + input_shape` and `(n2,) + input_shape`; `kernel_in` is a
`Kernel` object. The kernel function should return a `PyTree`.
batch_size:
Integer specifying the size of batches in which to split the data.
store_on_device:
A boolean that species whether the computed kernel should be kept on
device or brought back to CPU as it is computed. Defaults to `True`.
Returns:
A new function with the same signature as kernel_fn that computes the kernel
by batching over the dataset serially with the specified batch_size.
"""
device_count = max(getattr(kernel_fn, 'device_count', 1), 1)
if not store_on_device:
_kernel_fn = kernel_fn
@utils.wraps(_kernel_fn)
def kernel_fn(x1, x2=None, *args, **kwargs):
return device_put(_kernel_fn(x1, x2, *args, **kwargs), devices('cpu')[0])
flatten = partial(_flatten_kernel, is_parallel=False)
def serial_fn_x1(x1: NTTree[np.ndarray],
x2: Optional[NTTree[Optional[np.ndarray]]] = None,
*args,
**kwargs) -> NTTree[Kernel]:
x2_is_none = utils.all_none(x2)
if x2_is_none:
# TODO(schsam): Only compute the upper triangular part of the kernel.
x2 = x1
@utils.nt_tree_fn(reduce=lambda x: x[0])
def get_n1_n2(x1, x2):
n1, n2 = x1.shape[0], x2.shape[0]
return n1, n2
n1, n2 = get_n1_n2(x1, x2)
(n1_batches, n1_batch_size, n2_batches, n2_batch_size) = (
_get_n_batches_and_batch_sizes(n1, n2, batch_size, device_count))
@utils.nt_tree_fn(nargs=1)
def batch_input(x, batch_count, batch_size):
input_shape = x.shape[1:]
return np.reshape(x, (batch_count, batch_size,) + input_shape)
x1s = batch_input(x1, n1_batches, n1_batch_size)
x2s = batch_input(x2, n2_batches, n2_batch_size)
kwargs_np1 = {}
kwargs_np2 = {}
kwargs_other = {}
for k, v in kwargs.items():
if _is_np_ndarray(v):
if k == 'rng':
key1, key2 = random.split(v)
v1 = random.split(key1, n1_batches)
v2 = random.split(key2, n2_batches)
else:
assert isinstance(v, tuple) and len(v) == 2
v1 = np.reshape(v[0], (n1_batches, n1_batch_size,) + v[0].shape[1:])
v2 = np.reshape(v[1], (n2_batches, n2_batch_size,) + v[1].shape[1:])
kwargs_np1[k] = v1
kwargs_np2[k] = v2
else:
kwargs_other[k] = v
def row_fn(_, x1):
return _, _scan(col_fn, x1, (x2s, kwargs_np2))[1]
def col_fn(x1, x2):
x1, kwargs1 = x1
x2, kwargs2 = x2
kwargs_merge = {
**kwargs_other,
**dict((k, (kwargs1[k], kwargs2[k])) for k in kwargs1)
}
return (x1, kwargs1), kernel_fn(x1, x2, *args, **kwargs_merge)
_, kernel = _scan(row_fn, 0, (x1s, kwargs_np1))
return flatten(kernel, x2_is_none)
def serial_fn_kernel(k: NTTree[Kernel], *args, **kwargs) -> NTTree[Kernel]:
def get_n1_n2(k: NTTree[Kernel]) -> Tuple[int, ...]:
if utils.is_list_or_tuple(k):
# TODO(schsam): We might want to check for consistency here, but I can't
# imagine a case where we could get inconsistent kernels.
return get_n1_n2(k[0])
if isinstance(k, Kernel):
return k.nngp.shape[:2] # pytype: disable=attribute-error
raise TypeError(type(Kernel), Kernel)
n1, n2 = get_n1_n2(k)
(n1_batches, n1_batch_size,
n2_batches, n2_batch_size) = _get_n_batches_and_batch_sizes(n1, n2,
batch_size,
device_count)
n1s = np.arange(0, n1, n1_batch_size)
n2s = np.arange(0, n2, n2_batch_size)
@utils.nt_tree_fn(nargs=1)
def slice_kernel(k, n1, n2):
return k.slice(n1, n2)
kwargs_np1 = {}
kwargs_np2 = {}
kwargs_other = {}
for key, v in kwargs.items():
if _is_np_ndarray(v):
assert isinstance(v, tuple) and len(v) == 2
v1 = np.reshape(v[0], (n1_batches, n1_batch_size,) + v[0].shape[1:])
v2 = np.reshape(v[1], (n2_batches, n2_batch_size,) + v[1].shape[1:])
kwargs_np1[key] = v1
kwargs_np2[key] = v2
else:
kwargs_other[key] = v
def row_fn(_, n1):
return _, _scan(col_fn, n1, (n2s, kwargs_np2))[1]
def col_fn(n1, n2):
# NOTE(schsam): If we end up wanting to enable jit-of-batch then we will
# probably have to change this to dynamic slicing.
n1, kwargs1 = n1
n2, kwargs2 = n2
kwargs_merge = {
**kwargs_other,
**dict((key, (kwargs1[key], kwargs2[key])) for key in kwargs1)
}
n1_slice = slice(n1, n1 + n1_batch_size)
n2_slice = slice(n2, n2 + n2_batch_size)
in_kernel = slice_kernel(k, n1_slice, n2_slice)
return (n1, kwargs1), kernel_fn(in_kernel, *args, **kwargs_merge)
cov2_is_none = utils.nt_tree_fn(reduce=all)(lambda k: k.cov2 is None)(k)
_, k = _scan(row_fn, 0, (n1s, kwargs_np1))
if cov2_is_none:
k = _set_cov2_to_none(k)
return flatten(k, cov2_is_none)
@utils.wraps(kernel_fn)
def serial_fn(x1_or_kernel: Union[NTTree[np.ndarray], NTTree[Kernel]],
x2: Optional[NTTree[Optional[np.ndarray]]] = None,
*args,
**kwargs) -> NTTree[Kernel]:
if utils.is_nt_tree_of(x1_or_kernel, (onp.ndarray, np.ndarray)):
return serial_fn_x1(x1_or_kernel, x2, *args, **kwargs)
elif utils.is_nt_tree_of(x1_or_kernel, Kernel):
if x2 is not None:
raise ValueError(f'`x2` must be `None`, got {x2}.')
return serial_fn_kernel(x1_or_kernel, *args, **kwargs)
else:
raise TypeError(x1_or_kernel, type(x1_or_kernel))
return serial_fn
def _parallel(kernel_fn: _KernelFn,
use_serial: bool = True,
dropout_in_analytic_kernel: bool = False,
device_count: int = -1,
) -> _KernelFn:
"""Returns a function that computes a kernel in batches in parallel.
When batching in parallel, the data is split over a set number of devices.
The number of devices must be less than or equal to the number of physical
devices. Moreover, the dataset size needs to divide the device count.
Given two datasets `x1` and `x2`, parallel splits the kernel calculation over
devices such that each device computes a batch of rows of shape
`[|x1| / device_count, |x2|]`.
Args:
kernel_fn:
A function that computes a kernel between two datasets,
`kernel_fn(x1, x2)` or the compositional kernel for an input kernel
`kernel_fn(kernel_in)`. Here `x1` and `x2` are `np.ndarray`s of floats of
shape `(n1,) + input_shape` and `(n2,) + input_shape`; `kernel_in` is a
Kernel object. The kernel function should return a `PyTree`.
use_serial:
Whether `serial` will be called after `_parallel`. The only use case is to
make sure when `dropout` is used in the analytic/empirical kernel, the
batch size in each device is square.
dropout_in_analytic_kernel:
whether `dropout` is used in the analytic kernel. See `use_serial` above
for the only use case.
device_count:
Integer specifying the number of devices over which to split the data. If
`device_count == 0`, the computation is parallelized over all available
devices.
Returns:
A new function with the same signature as kernel_fn that computes the kernel
by batching over the dataset in parallel over a specified number of cores.
"""
if device_count == -1:
device_count = jax.local_device_count()
def _check_dropout(n1, n2, kwargs):
dropout_in_empirical_kernel = getattr(kwargs, 'rng', None) is not None
if n1 == n2 and (dropout_in_empirical_kernel or
dropout_in_analytic_kernel) and not use_serial:
raise NotImplementedError(
'Batching for empirical / analytic kernels with dropout'
' is not implemented for non-square batch size. '
'Using `serial` (i.e. use a non-zero batch_size in the '
'`batch` function.) could enforce square batch size in each device.')
def _get_n_per_device(n1):
_device_count = device_count
n1_per_device, ragged = divmod(n1, device_count)
if n1_per_device and ragged:
raise ValueError(
('Dataset size ({}) must divide number of '
'physical devices ({}).').format(n1, device_count))
elif not n1_per_device:
_device_count = ragged
n1_per_device = 1
return n1_per_device, _device_count
def parallel_fn_x1(x1, x2=None, *args, **kwargs):
x2_is_none = utils.all_none(x2)
if x2_is_none:
# TODO(schsam): Only compute the upper triangular part of the kernel.
x2 = x1
def get_batch_size(x):
if utils.is_list_or_tuple(x):
return get_batch_size(x[0])
return x.shape[0]
n1 = get_batch_size(x1)
n2 = n1 if x2_is_none else get_batch_size(x2)
_check_dropout(n1, n2, kwargs)
n1_per_device, _device_count = _get_n_per_device(n1)
_kernel_fn = _jit_or_pmap_broadcast(kernel_fn, _device_count)
@utils.nt_tree_fn()
def batch_data(x):
input_shape = x.shape[1:]
return np.reshape(x, (_device_count, n1_per_device,) + input_shape)
for k, v in kwargs.items():
if _is_np_ndarray(v):
assert isinstance(v, tuple) and len(v) == 2
v0 = np.reshape(v[0], (_device_count, n1_per_device,) + v[0].shape[1:])
kwargs[k] = (v0, v[1])
x1 = batch_data(x1)
kernel = _kernel_fn(x1, x2, *args, **kwargs)
return _flatten_kernel(kernel, x2_is_none, True)
def parallel_fn_kernel(kernel, *args, **kwargs):
@utils.nt_tree_fn(reduce=lambda shapes: shapes[0])
def get_batch_sizes(k):
n1 = n2 = k.cov1.shape[0]
if k.cov2 is not None:
n2 = k.cov2.shape[0]
return n1, n2
n1, n2 = get_batch_sizes(kernel)
_check_dropout(n1, n2, kwargs)
n1_per_device, _device_count = _get_n_per_device(n1)
_kernel_fn = _jit_or_pmap_broadcast(kernel_fn, _device_count)
cov2_is_none = utils.nt_tree_fn(reduce=lambda k:
all(k))(lambda k: k.cov2 is None)(kernel)
kernel = _reshape_kernel_for_pmap(kernel, _device_count, n1_per_device)
kernel = _kernel_fn(kernel, *args, **kwargs)
if cov2_is_none:
kernel = _set_cov2_to_none(kernel)
return _flatten_kernel(kernel, cov2_is_none, True)
@utils.wraps(kernel_fn)
def parallel_fn(x1_or_kernel, x2=None, *args, **kwargs):
if utils.is_nt_tree_of(x1_or_kernel, (onp.ndarray, np.ndarray)):
return parallel_fn_x1(x1_or_kernel, x2, *args, **kwargs)
elif utils.is_nt_tree_of(x1_or_kernel, Kernel):
assert not x2
return parallel_fn_kernel(x1_or_kernel, *args, **kwargs)
raise NotImplementedError()
# Set function attributes so that `serial` can detect whether or not it is
# acting on a parallel function.
parallel_fn.device_count = device_count
return parallel_fn
def _get_n_batches_and_batch_sizes(n1: int,
n2: int,
batch_size: int,
device_count: int
) -> Tuple[int, int, int, int]:
# TODO(romann): if dropout batching works for different batch sizes, relax.
max_serial_batch_size = onp.gcd(n1, n2) // device_count
n2_batch_size = min(batch_size, max_serial_batch_size)
if n2_batch_size != batch_size:
warnings.warn(
'Batch size is reduced from requested %d to effective %d to '
'fit the dataset.' % (batch_size, n2_batch_size))
n1_batch_size = n2_batch_size * device_count
n1_batches, ragged = divmod(n1, n1_batch_size)
if ragged:
# TODO(schsam): Relax this constraint.
msg = ('Number of rows of kernel must divide batch size. Found n1 = {} '
'and batch size = {}.').format(n1, n1_batch_size)
if device_count > 1:
msg += (' Note that device parallelism was detected and so the batch '
'size was expanded by a factor of {}.'.format(device_count))
raise ValueError(msg)
n2_batches, ragged = divmod(n2, n2_batch_size)
if ragged:
# TODO(schsam): Relax this constraint.
raise ValueError(('Number of columns of kernel must divide batch '
'size. Found n2 = {} '
'and batch size = {}').format(n2, n2_batch_size))
return n1_batches, n1_batch_size, n2_batches, n2_batch_size
def _is_np_ndarray(x) -> bool:
if x is None:
return False
return tree_all(tree_map(
lambda y: isinstance(y, (onp.ndarray, np.ndarray)), x))
def _get_jit_or_pmap_broadcast():
"""Initializes a cache of pmapped functions closed over non-`np.ndarray` args.
Returns:
A `jit_or_pmap_broadcast` function allowing to jit or pmap a function as a
closure over all non-`np.ndarray` args, all `kwargs`, while broadcasting
all `np.ndarray`s in `args` except the first one.
"""
cache = {}
def jit_or_pmap_broadcast(f: Callable, device_count: int = -1) -> Callable:
"""Pmap `f` over the first argument by closing over or broadcasting others.
Args:
f:
function to pmap. First argument must be an `np.ndarray` or a Kernel.
In either case, ndarrays should have a leading axis having the size of
`device_count`.
device_count:
number of XLA devices. `-1` means all available devices. `0` means to
just `jit` the function.
Returns:
A function of the same signature as `f` pmapped over the `np.ndarray`s in
the first argument. Other arguments are either closed over
(non-`np.ndarray`s in `args` and all `kwargs`) or broadcasted to
`(device_count,) + old_shape` (for `np.ndarray`s). If `device_count == 0`,
`f` is closed over and jitted over all non-array arguments and all
`kwargs`.
Raises:
An error if `kwargs` have a `np.ndarray`.
TODO(romann): treat `np.ndarray`s in `kwargs` when JAX allows it. See
https://github.com/google/jax/issues/912
"""
key = (f, device_count)
if device_count == -1:
device_count = jax.local_device_count()
# TODO(romann): adapt this when JAX allows `axis_in` for `pmap`.
def broadcast(arg: np.ndarray) -> np.ndarray:
if device_count == 0:
return arg
return np.broadcast_to(arg, (device_count,) + arg.shape)
@utils.wraps(f)
def f_pmapped(x_or_kernel: Union[np.ndarray, Kernel], *args, **kwargs):
args_np, args_np_idxs = [], []
args_other = {}
# TODO(romann): treat `np.ndarray`s in `kwargs` when JAX allows it.
# https://github.com/google/jax/issues/912
# Filter out `np.ndarray`s from other arguments.
for i, arg in enumerate(args):
if _is_np_ndarray(arg):
args_np.append(arg)
args_np_idxs.append(i)
else:
args_other[i] = arg
kwargs_np = {}
kwargs_other = {}
for k, v in kwargs.items():
if _is_np_ndarray(v):
assert isinstance(v, tuple), len(v) == 2
kwargs_np[k] = (v[0], broadcast(v[1]))
else:
kwargs_other[k] = v
# Check cache before jitting.
_key = key + (
tuple(args_other.items()) +
tuple(kwargs_other.items()))
if _key in cache:
_f = cache[_key]
else:
# Define a `np.ndarray`-only function as a closure over other arguments.
def _f(_x_or_kernel, *_args_np, **_kwargs_np):
# Merge args.
_args_np = {i: _arg_np for i, _arg_np in zip(args_np_idxs, _args_np)}
_args = {**_args_np, **args_other}
_args = tuple(v for k, v in sorted(_args.items()))
_kwargs = {**_kwargs_np, **kwargs_other}
return f(_x_or_kernel, *_args, **_kwargs)
_f = jit(_f) if device_count == 0 else pmap(_f)
cache[_key] = _f
# Broadcast `np.ndarray` arguments and apply the new function to them.
args_np = tree_map(broadcast, args_np)
return _f(x_or_kernel, *args_np, **kwargs_np)
return f_pmapped
return jit_or_pmap_broadcast
_jit_or_pmap_broadcast = _get_jit_or_pmap_broadcast()
| 27,509 | 35.102362 | 81 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/predict.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to make predictions on the train/test set using NTK/NNGP.
Most functions in this module accept training data as inputs and return a new
function `predict_fn` that computes predictions on the train set / given test
set / timesteps.
.. warning::
`trace_axes` parameter supplied to prediction functions must match the
respective parameter supplied to the function used to compute the kernel.
Namely, this is the same `trace_axes` used to compute the empirical kernel
(`utils/empirical.py`; `diagonal_axes` must be `()`), or `channel_axis` in the
output of the top layer used to compute the closed-form kernel (`stax.py`;
note that closed-form kernels currently only support a single `channel_axis`).
"""
import collections
from functools import lru_cache
from typing import Callable, Dict, Generator, Iterable, NamedTuple, Optional, Tuple, Union, Any
import jax
from jax import grad
from jax.experimental import ode
import jax.numpy as np
import jax.scipy as sp
from jax.tree_util import tree_all, tree_map
import numpy as onp
import scipy as osp
from typing_extensions import Protocol
from .utils import dataclasses, utils
from .utils.typing import Axes, Get, KernelFn
PyTree = Any
ArrayOrScalar = Union[None, int, float, np.ndarray]
"""Alias for optional arrays or scalars."""
class PredictFn(Protocol):
"""A type alias for a predictor function."""
def __call__(
self,
t: Optional[ArrayOrScalar] = None,
fx_train_0: ArrayOrScalar = 0.,
fx_test_0: Optional[ArrayOrScalar] = None,
k_test_train: Optional[np.ndarray] = None
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
...
def gradient_descent_mse(
k_train_train: np.ndarray,
y_train: np.ndarray,
learning_rate: float = 1.,
diag_reg: float = 0.,
diag_reg_absolute_scale: bool = False,
trace_axes: Axes = (-1,)
) -> PredictFn:
r"""Predicts the outcome of function space gradient descent training on MSE.
Solves in closed form for the continuous-time version of gradient descent.
Uses the closed-form solution for gradient descent on an MSE loss in function
space detailed in [*,**] given a Neural Tangent or Neural Network Gaussian
Process Kernel over the dataset. Given NNGP or NTK, this function will return
a function that predicts the time evolution for function space points at
arbitrary time[s] (training step[s]) `t`. Note that these time[s] (step[s])
are continuous and are interpreted in units of the `learning_rate` so
`absolute_time = learning_rate * t`, and the scales of `learning_rate` and `t`
are interchangeable.
Note that first invocation of the returned `predict_fn` will be slow and
allocate a lot of memory for its whole lifetime, as either eigendecomposition
(`t` is a scalar or an array) or Cholesky factorization (`t=None`) of
`k_train_train` is performed and cached for future invocations (or both, if
the function is called on both finite and infinite (`t=None`) times).
[*] "`Neural Tangent Kernel: Convergence and Generalization in Neural Networks
<https://arxiv.org/abs/1806.07572>`_"
[**] "`Wide Neural Networks of Any Depth Evolve as Linear
Models Under Gradient Descent <https://arxiv.org/abs/1902.06720>`_"
Example:
>>> import neural_tangents as nt
>>> #
>>> t = 1e-7
>>> kernel_fn = nt.empirical_ntk_fn(f)
>>> k_train_train = kernel_fn(x_train, None, params)
>>> k_test_train = kernel_fn(x_test, x_train, params)
>>> #
>>> predict_fn = nt.predict.gradient_descent_mse(k_train_train, y_train)
>>> #
>>> fx_train_0 = f(params, x_train)
>>> fx_test_0 = f(params, x_test)
>>> #
>>> fx_train_t, fx_test_t = predict_fn(t, fx_train_0, fx_test_0,
>>> k_test_train)
Args:
k_train_train:
kernel on the training data. Must have the shape of
`zip(y_train.shape, y_train.shape)` with `trace_axes` absent.
y_train:
targets for the training data.
learning_rate:
learning rate, step size.
diag_reg:
a scalar representing the strength of the diagonal regularization for
`k_train_train`, i.e. computing `k_train_train + diag_reg * I` during
Cholesky factorization or eigendecomposition.
diag_reg_absolute_scale:
`True` for `diag_reg` to represent regularization in absolute units,
`False` to be `diag_reg * np.mean(np.trace(k_train_train))`.
trace_axes:
`f(x_train)` axes such that `k_train_train` lacks these pairs of
dimensions and is to be interpreted as :math:`\Theta \otimes I`, i.e.
block-diagonal along `trace_axes`. These can can be specified either to
save space and compute, or to even improve approximation accuracy of the
infinite-width or infinite-samples limit, since in these limits the
covariance along channel / feature / logit axes indeed converges to a
constant-diagonal matrix. However, if you target linearized dynamics of a
specific finite-width network, `trace_axes=()` will yield most accurate
result.
Returns:
A function of signature
`predict_fn(t, fx_train_0, fx_test_0, k_test_train)` that
returns output train [and test] set[s] predictions at time[s] `t`.
"""
_, odd, first, _ = _get_axes(k_train_train)
trace_axes = utils.canonicalize_axis(trace_axes, y_train)
trace_axes = tuple(-y_train.ndim + a for a in trace_axes)
n_t_axes, n_non_t_axes = len(trace_axes), y_train.ndim - len(trace_axes)
last_t_axes = tuple(range(-n_t_axes, 0))
non_t_axes = tuple(range(-y_train.ndim, -n_t_axes))
@lru_cache(1)
def get_predict_fn_inf():
with jax.core.eval_context():
solve = _get_cho_solve(k_train_train, diag_reg, diag_reg_absolute_scale)
def predict_fn_inf(fx_train_0, fx_test_0, k_test_train):
fx_train_t = y_train.astype(k_train_train.dtype)
if fx_test_0 is None:
return fx_train_t
rhs = y_train if fx_train_0 is None else y_train - fx_train_0
dfx_test = np.tensordot(k_test_train, solve(rhs, trace_axes),
(odd, first))
dfx_test = np.moveaxis(dfx_test, last_t_axes, trace_axes)
fx_test_t = fx_test_0 + dfx_test
if fx_train_0 is None:
return fx_test_t
return fx_train_t, fx_test_t
return predict_fn_inf
@lru_cache(1)
def get_predict_fn_finite():
with jax.core.eval_context():
expm1_fn, inv_expm1_fn = _get_fns_in_eigenbasis(
k_train_train,
diag_reg,
diag_reg_absolute_scale,
(_make_expm1_fn(y_train.size),
_make_inv_expm1_fn(y_train.size))
)
rhs_shape = tuple(y_train.shape[a] for a in trace_axes)
def predict_fn_finite(t, fx_train_0, fx_test_0, k_test_train):
t = np.array(t) * learning_rate
t_shape, t_ndim = t.shape, t.ndim
first_t_axes = tuple(range(t_ndim))
t = t.reshape((-1, 1))
rhs = -y_train if fx_train_0 is None else fx_train_0 - y_train
rhs = np.moveaxis(rhs, trace_axes, last_t_axes).reshape(
(-1,) + rhs_shape)
shape = t_shape + k_train_train.shape[1::2] + rhs_shape
if fx_train_0 is not None:
dfx_train = expm1_fn(rhs, t).reshape(shape)
dfx_train = np.moveaxis(dfx_train, last_t_axes, trace_axes)
fx_train_t = np.expand_dims(fx_train_0, first_t_axes) + dfx_train
if fx_test_0 is not None:
dfx_test = inv_expm1_fn(rhs, t).reshape(shape)
dfx_test = np.tensordot(k_test_train, dfx_test, (odd, non_t_axes))
dfx_test = np.moveaxis(
dfx_test,
tuple(range(n_non_t_axes, n_non_t_axes + t_ndim)) + last_t_axes,
tuple(range(t_ndim)) + trace_axes)
fx_test_t = np.expand_dims(fx_test_0, first_t_axes) + dfx_test
if fx_train_0 is not None and fx_test_0 is not None:
return fx_train_t, fx_test_t
if fx_test_0 is None:
return fx_train_t
return fx_test_t
return predict_fn_finite
def predict_fn(
t: Optional[ArrayOrScalar] = None,
fx_train_0: ArrayOrScalar = 0.,
fx_test_0: Optional[ArrayOrScalar] = None,
k_test_train: Optional[np.ndarray] = None
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Return output predictions on train [and test] set[s] at time[s] `t`.
Args:
t:
a scalar of array of scalars of any shape. `t=None` is treated as
infinity and returns the same result as `t=np.inf`, but is computed
using identity or linear solve for train and test predictions
respectively instead of eigendecomposition, saving time and precision.
Equivalent of training steps (but can be fractional).
fx_train_0:
output of the network at `t == 0` on the training set. `fx_train_0=None`
means to not compute predictions on the training set.
fx_test_0:
output of the network at `t == 0` on the test set. `fx_test_0=None`
means to not compute predictions on the test set.
k_test_train:
kernel relating test data with training data. Must have the shape of
`zip(y_test.shape, y_train.shape)` with `trace_axes` absent. Pass
`k_test_train=None` if you only need non-regularized (`diag_reg=0`)
predictions on the training set. For regularized train-set predictions,
pass `k_test_train=k_train_train`.
Returns:
`fx_train_t` or `(fx_train_t, fx_test_t)` if `fx_test_0 != None` with
potentially additional leading time dimensions matching `t.shape`.
Raises:
ValueError: if `fx_test_0` is not `None`, but `k_test_train` is `None`.
"""
_check_inputs(fx_train_0, fx_test_0, k_test_train)
# Infinite time
if t is None:
return get_predict_fn_inf()(fx_train_0, fx_test_0, k_test_train)
# Finite time
return get_predict_fn_finite()(t, fx_train_0, fx_test_0, k_test_train)
return predict_fn
@dataclasses.dataclass
class ODEState:
"""ODE state dataclass holding outputs and auxiliary variables.
Attributes:
fx_train:
training set outputs.
fx_test:
test set outputs.
qx_train:
training set auxiliary state variable (e.g. momentum).
qx_test:
test set auxiliary state variable (e.g. momentum).
"""
fx_train: Optional[np.ndarray] = None
fx_test: Optional[np.ndarray] = None
qx_train: Optional[np.ndarray] = None
qx_test: Optional[np.ndarray] = None
class PredictFnODE(Protocol):
"""A type alias for a predictor function operating on an `ODEState`."""
def __call__(
self,
t: Optional[ArrayOrScalar] = None,
fx_train_or_state_0: Union[ArrayOrScalar, ODEState] = 0.,
fx_test_0: Optional[ArrayOrScalar] = None,
k_test_train: Optional[np.ndarray] = None
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray], ODEState]:
...
def gradient_descent(
loss: Callable[[np.ndarray, np.ndarray], float],
k_train_train: np.ndarray,
y_train: np.ndarray,
learning_rate: float = 1.,
momentum: Optional[float] = None,
trace_axes: Axes = (-1,)
) -> PredictFnODE:
r"""Predicts the outcome of function space training using gradient descent.
Uses an ODE solver. If `momentum != None`, solves a continuous-time version of
gradient descent with momentum.
.. note::
We use standard momentum as opposed to Nesterov momentum.
Solves the function space ODE for [momentum] gradient descent with a given
`loss` (detailed in "`Wide Neural Networks of Any Depth Evolve as Linear
Models Under Gradient Descent <https://arxiv.org/abs/1902.06720>`_".) given a
Neural Tangent Kernel[s] over the dataset[s] at arbitrary time[s] (step[s])
`t`. Note that for gradient descent `absolute_time = learning_rate * t` and
the scales of the learning rate and query step[s] `t` are interchangeable.
However, the momentum gradient descent ODE is solved in the units of
`learning_rate**0.5`, and therefore `absolute_time = learning_rate**0.5 * t`,
hence the `learning_rate` and training time[s] (step[s]) `t` scales are not
interchangeable.
Example:
>>> import neural_tangents as nt
>>> #
>>> t = 1e-7
>>> learning_rate = 1e-2
>>> momentum = 0.9
>>> #
>>> kernel_fn = nt.empirical_ntk_fn(f)
>>> k_test_train = kernel_fn(x_test, x_train, params)
>>> #
>>> from jax.nn import log_softmax
>>> cross_entropy = lambda fx, y_hat: -np.mean(log_softmax(fx) * y_hat)
>>> predict_fn = nt.redict.gradient_descent(
>>> cross_entropy, k_train_train, y_train, learning_rate, momentum)
>>> #
>>> fx_train_0 = f(params, x_train)
>>> fx_test_0 = f(params, x_test)
>>> #
>>> fx_train_t, fx_test_t = predict_fn(t, fx_train_0, fx_test_0,
>>> k_test_train)
Args:
loss:
a loss function whose signature is `loss(f(x_train), y_train)`. Note:
the loss function should treat the batch and output dimensions
symmetrically.
k_train_train:
kernel on the training data. Must have the shape of
`zip(y_train.shape, y_train.shape)` with `trace_axes` absent.
y_train:
targets for the training data.
learning_rate:
learning rate, step size.
momentum:
momentum scalar.
trace_axes:
`f(x_train)` axes such that `k_train_train` lacks these pairs of
dimensions and is to be interpreted as :math:`\Theta \otimes I`, i.e.
block-diagonal along `trace_axes`. These can can be specified either to
save space and compute, or to even improve approximation accuracy of the
infinite-width or infinite-samples limit, since in these limits the
covariance along channel / feature / logit axes indeed converges to a
constant-diagonal matrix. However, if you target linearized dynamics of a
specific finite-width network, `trace_axes=()` will yield most accurate
result.
Returns:
A function that returns output train [and test] set[s] predictions at
time[s] `t`.
"""
_, odd, _, _ = _get_axes(k_train_train)
trace_axes = utils.canonicalize_axis(trace_axes, y_train)
non_t_axes = tuple(a for a in range(y_train.ndim) if a not in trace_axes)
last_t_axes = range(-len(trace_axes), 0)
dtype = k_train_train.dtype
grad_loss = grad(lambda fx: loss(fx, y_train))
if momentum is not None:
learning_rate **= 0.5
momentum = (momentum - 1.0) / learning_rate
def get_state_0(fx_train_or_state_0, fx_test_0, fx_test_shape):
if isinstance(fx_train_or_state_0, ODEState):
fx_train_0 = fx_train_or_state_0.fx_train
fx_test_0 = fx_train_or_state_0.fx_test
qx_train_0 = fx_train_or_state_0.qx_train
qx_test_0 = fx_train_or_state_0.qx_test
else:
fx_train_0 = fx_train_or_state_0
qx_train_0 = qx_test_0 = None
if fx_train_0 is None:
fx_train_0 = np.zeros_like(y_train, dtype)
else:
fx_train_0 = np.broadcast_to(fx_train_0, y_train.shape)
if fx_test_0 is not None:
fx_test_0 = np.broadcast_to(fx_test_0, fx_test_shape)
if momentum is None:
if qx_train_0 is not None or qx_test_0 is not None:
raise ValueError('Got passed momentum state variables, while '
'`momentum is None`.')
else:
qx_train_0 = (np.zeros_like(y_train, dtype) if qx_train_0 is None else
np.broadcast_to(qx_train_0, y_train.shape))
qx_test_0 = (None if fx_test_0 is None else
(np.zeros(fx_test_shape, dtype) if qx_test_0 is None
else np.broadcast_to(qx_test_0, fx_test_shape)))
return ODEState(fx_train_0, fx_test_0, qx_train_0, qx_test_0) # pytype: disable=wrong-arg-count
def get_dstate_dt(k_test_train):
def dstate_dt(state_t: ODEState, unused_t) -> ODEState:
fx_train_t, fx_test_t, qx_train_t, qx_test_t = (
state_t.fx_train, state_t.fx_test, state_t.qx_train, state_t.qx_test)
dy_df_t = grad_loss(fx_train_t)
fx_train_t = -np.moveaxis(
np.tensordot(k_train_train, dy_df_t, (odd, non_t_axes)),
last_t_axes, trace_axes
)
if fx_test_t is not None:
fx_test_t = -np.moveaxis(
np.tensordot(k_test_train, dy_df_t, (odd, non_t_axes)),
last_t_axes, trace_axes
)
if momentum is None:
return ODEState(fx_train_t, fx_test_t)
fx_train_t += momentum * qx_train_t
if qx_test_t is not None:
fx_test_t += momentum * qx_test_t
return ODEState(qx_train_t, qx_test_t, fx_train_t, fx_test_t) # pytype: disable=wrong-arg-count
return dstate_dt
def predict_fn(
t: Optional[ArrayOrScalar] = None,
fx_train_or_state_0: Union[ArrayOrScalar, ODEState] = 0.,
fx_test_0: Optional[ArrayOrScalar] = None,
k_test_train: Optional[np.ndarray] = None
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray], ODEState]:
"""Return output predictions on train [and test] set[s] at time[s] `t`.
Args:
t:
a scalar or array of scalars of any shape in strictly increasing order.
`t=None` is equivalent to `t=np.inf` and may not converge. Equivalent of
training steps (but can be fractional).
fx_train_or_state_0:
either (a) output of the network at `t == 0` on the training set or (b)
complete ODE state (`predict.ODEState`). Pass an ODE state if you want
to operate on the full ODE state instead of output variables only
(useful for inspecting auxiliary variables or resuming an optimizer with
auxiliary variables from a specific state. Note that only
`momentum != None` optimizer currently has auxiliary variables. To
initialize an ODE state from scratch, call
`predict.ODEState(fx_train_0, fx_test_0)`. If an ODE state is passed, an
ODE state is returned. `fx_train_0=None` means to not compute
predictions on the training set.
fx_test_0:
output of the network at `t == 0` on the test set. `fx_test_0=None`
means to not compute predictions on the test set.
k_test_train:
kernel relating test data with training data. Must have the shape of
`zip(y_test.shape, y_train.shape)` with `trace_axes` absent. Pass
`k_test_train=None` if you only need predictions on the training set.
Returns:
`fx_train_t` or `(fx_train_t, fx_test_t)` if `fx_test_0 != None` with
potentially additional leading time dimensions matching `t.shape`.
Alternatively can return an `ODEState` at time[s] `t`.
Raises:
ValueError: if `fx_test_0` is not `None`, but `k_test_train` is `None`.
"""
_check_inputs(fx_train_or_state_0, fx_test_0, k_test_train)
t = np.array(t if t is not None else np.inf, dtype) * learning_rate
t_shape = t.shape
t = t.reshape((-1,))
# ODE solver requires `t[0]` to be the time where `fx_train_0` [and
# `fx_test_0`] are evaluated, but also a strictly increasing sequence of
# timesteps, so we always temporarily append an [almost] `0` at the start.
t0 = np.where(t[0] == 0,
np.full((1,), -1e-24, t.dtype),
np.zeros((1,), t.dtype))
t = np.concatenate([t0, t])
# Solve the ODE.
fx_test_shape = _get_fx_test_shape(y_train, k_test_train, trace_axes)
state_0 = get_state_0(fx_train_or_state_0, fx_test_0, fx_test_shape)
state_t = ode.odeint(get_dstate_dt(k_test_train), state_0, t)
# Remove the added `t0`.
trim = lambda x: x[1:].reshape(t_shape + x.shape[1:])
trim_tree = lambda tree: tree_map(trim, tree)
state_t = trim_tree(state_t)
# `ODEState` -> `ODEState`
if isinstance(fx_train_or_state_0, ODEState):
return state_t
# `np.ndarray` -> `np.ndarray`
fx_train_t, fx_test_t = state_t.fx_train, state_t.fx_test
if fx_train_or_state_0 is not None and fx_test_0 is None:
return fx_train_t
if fx_test_0 is not None and fx_train_or_state_0 is None:
return fx_test_t
return fx_train_t, fx_test_t
return predict_fn
class Gaussian(NamedTuple):
"""A `(mean, covariance)` convenience namedtuple.
Attributes:
mean:
Mean of shape equal to the shape of the function outputs.
covariance:
Covariance of shape equal to the shape of the respective NTK/NNGP kernel.
"""
mean: np.ndarray
covariance: np.ndarray
def gp_inference(
k_train_train,
y_train: np.ndarray,
diag_reg: float = 0.,
diag_reg_absolute_scale: bool = False,
trace_axes: Axes = (-1,)):
r"""Compute the mean and variance of the 'posterior' of NNGP/NTK/NTKGP.
NNGP - the exact posterior of an infinitely wide Bayesian NN. NTK - exact
distribution of an infinite ensemble of infinitely wide NNs trained with
gradient flow for infinite time. NTKGP - posterior of a GP (Gaussian process)
with the NTK covariance (see
"`Bayesian Deep Ensembles via the Neural Tangent Kernel
<https://arxiv.org/abs/2007.05864>`_" for how this can correspond to infinite
ensembles of infinitely wide NNs as well).
Note that first invocation of the returned `predict_fn` will be slow and
allocate a lot of memory for its whole lifetime, as a Cholesky factorization
of `k_train_train.nngp` or `k_train_train.ntk` (or both) is performed and
cached for future invocations.
Args:
k_train_train:
train-train kernel. Can be (a) :class:`jax.numpy.ndarray`,
(b) `Kernel` namedtuple, (c) :class:`~neural_tangents.Kernel` object.
Must contain the necessary `nngp` and/or `ntk` kernels for arguments
provided to the returned `predict_fn` function. For example, if you
request to compute posterior test [only] NTK covariance in future
`predict_fn` invocations, `k_train_train` must contain both `ntk` and
`nngp` kernels.
y_train:
train targets.
diag_reg:
a scalar representing the strength of the diagonal regularization for
`k_train_train`, i.e. computing `k_train_train + diag_reg * I` during
Cholesky factorization.
diag_reg_absolute_scale:
`True` for `diag_reg` to represent regularization in absolute units,
`False` to be `diag_reg * np.mean(np.trace(k_train_train))`.
trace_axes:
`f(x_train)` axes such that `k_train_train`,
`k_test_train`[, and `k_test_test`] lack these pairs of dimensions and
are to be interpreted as :math:`\Theta \otimes I`, i.e. block-diagonal
along `trace_axes`. These can can be specified either to save space and
compute, or to even improve approximation accuracy of the infinite-width
or infinite-samples limit, since in these limits the covariance along
channel / feature / logit axes indeed converges to a constant-diagonal
matrix. However, if you target linearized dynamics of a specific
finite-width network, `trace_axes=()` will yield most accurate result.
Returns:
A function of signature `predict_fn(get, k_test_train, k_test_test)`
computing 'posterior' Gaussian distribution (mean or mean and covariance)
on a given test set.
"""
even, odd, first, last = _get_axes(_get_first(k_train_train))
trace_axes = utils.canonicalize_axis(trace_axes, y_train)
@lru_cache(2)
def solve(g: str):
k_dd = _get_attr(k_train_train, g)
return _get_cho_solve(k_dd, diag_reg, diag_reg_absolute_scale)
@lru_cache(2)
def k_inv_y(g: str):
return solve(g)(y_train, trace_axes)
@utils.get_namedtuple('Gaussians')
def predict_fn(get: Optional[Get] = None,
k_test_train=None,
k_test_test=None
) -> Dict[str, Union[np.ndarray, Gaussian]]:
"""`test`-set posterior given respective covariance matrices.
Args:
get:
string, the mode of the Gaussian process, either "nngp", "ntk", "ntkgp",
(see "`Bayesian Deep Ensembles via the Neural Tangent Kernel
<https://arxiv.org/abs/2007.05864>`_") or a tuple, or `None`. If `None`
then both `nngp` and `ntk` predictions are returned.
k_test_train:
test-train kernel. Can be (a) :class:`jax.numpy.ndarray`,
(b) `Kernel` namedtuple, (c) :class:`~neural_tangents.Kernel` object.
Must contain the necessary `nngp` and/or `ntk` kernels for arguments
provided to the returned `predict_fn` function. For example, if you
request to compute posterior test [only] NTK covariance, `k_test_train`
must contain both `ntk` and `nngp` kernels. If `None`, returns
predictions on the training set. Note that train-set outputs are always
`N(y_train, 0)` and mostly returned for API consistency.
k_test_test:
test-test kernel. Can be (a) :class:`jax.numpy.ndarray`,
(b) `Kernel` namedtuple, (c) :class:`~neural_tangents.Kernel` object.
Must contain the necessary `nngp` and/or `ntk` kernels for arguments
provided to the returned `predict_fn` function. Provide if you want to
compute test-test posterior covariance. `k_test_test=None` means to not
compute it. If `k_test_train is None`, pass any non-`None` value (e.g.
`True`) if you want to get non-regularized (`diag_reg=0`) train-train
posterior covariance. Note that non-regularized train-set outputs will
always be the zero-variance Gaussian `N(y_train, 0)` and mostly
returned for API consistency. For regularized train-set posterior
outputs according to a positive `diag_reg`, pass
`k_test_train=k_train_train`, and, optionally,
`k_test_test=nngp_train_train`.
Returns:
Either a :class:`Gaussian` `(mean, variance)` namedtuple or `mean` of the
GP posterior on the `test` set.
"""
if get is None:
get = ('nngp', 'ntk')
out = {}
for g in get:
k = g if g != 'ntkgp' else 'ntk'
k_dd = _get_attr(k_train_train, k)
k_td = None if k_test_train is None else _get_attr(k_test_train, k)
if k_td is None:
# Train set predictions.
y = y_train.astype(k_dd.dtype)
else:
# Test set predictions.
y = np.tensordot(k_td, k_inv_y(k), (odd, first))
y = np.moveaxis(y, range(-len(trace_axes), 0), trace_axes)
if k_test_test is not None:
if k_td is None:
out[g] = Gaussian(y, np.zeros_like(k_dd, k_dd.dtype))
else:
if (g == 'ntk' and
(not hasattr(k_train_train, 'nngp') or
not hasattr(k_test_train, 'nngp'))):
raise ValueError(
'If `"ntk" in get`, and `k_test_test is not None`, '
'and `k_test_train is not None`, i.e. you request the '
'NTK posterior covariance on the test set, you need '
'both NTK and NNGP train-train and test-train matrices '
'contained in `k_test_train` and `k_train_train`. '
'Hence they must be `namedtuple`s with `nngp` and '
'`ntk` attributes.')
# kernel of wide NN at initialization
g_init = 'nngp' if g != 'ntkgp' else 'ntk'
k_td_g_inv_y = solve(k)(_get_attr(k_test_train, g_init), even)
k_tt = _get_attr(k_test_test, g_init)
if g == 'nngp' or g == 'ntkgp':
cov = np.tensordot(k_td, k_td_g_inv_y, (odd, first))
cov = k_tt - utils.zip_axes(cov)
out[g] = Gaussian(y, cov)
elif g == 'ntk':
term_1 = solve(g)(k_td, even)
cov = np.tensordot(_get_attr(k_train_train, 'nngp'), term_1,
(odd, first))
cov = np.tensordot(term_1, cov, (first, first))
term_2 = np.tensordot(k_td, k_td_g_inv_y, (odd, first))
term_2 += np.moveaxis(term_2, first, last)
cov = utils.zip_axes(cov - term_2) + k_tt
out[g] = Gaussian(y, cov)
else:
raise ValueError(g)
else:
out[g] = y
return out
return predict_fn
_Kernel = collections.namedtuple('Kernel', 'nngp ntk')
"""Helper type to fit cache dictionaries to `get` API."""
_Kernel.__new__.__defaults__ = (None,) * len(_Kernel._fields)
def gradient_descent_mse_ensemble(
kernel_fn: KernelFn,
x_train: np.ndarray,
y_train: np.ndarray,
learning_rate: float = 1.,
diag_reg: float = 0.0,
diag_reg_absolute_scale: bool = False,
trace_axes: Axes = (-1,),
**kernel_fn_train_train_kwargs):
r"""Predicts the gaussian embedding induced by gradient descent on MSE loss.
This is equivalent to an infinite ensemble of infinite-width networks after
marginalizing out the initialization, if `kernel_fn` is the kernel function of
the infinite-width network. Note that `kernel_fn` can in principle also be an
empirical / Monte Carlo finite-width kernel function, but in this case the
returned output will not have a simple interpretation (unless these functions
are used to approximate the infinite-width kernel).
Note that first invocation of the returned `predict_fn` will be slow and
allocate a lot of memory for its whole lifetime, as the kernel computation,
and either eigendecomposition (`t` is a scalar or an array) or Cholesky
factorization (`t=None`) of `kernel_fn(x_train, None, get)` is performed and
cached for future invocations (or both, if the function is called on both
finite and infinite (`t=None`) times).
Args:
kernel_fn:
A kernel function that computes NNGP and/or NTK. Must have a signature
`kernel_fn(x1, x2, get, **kernel_fn_kwargs)` and return a
:class:`~neural_tangents.Kernel` object or a `namedtuple` with `nngp`
and/or `ntk` attributes. Therefore, it can be an `AnalyticKernelFn`, but
also a `MonteCarloKernelFn`, or an `EmpiricalKernelFn` (but only
`nt.empirical_kernel_fn` and not `nt.empirical_ntk_fn` or
`nt.empirical_nngp_fn`, since the latter two do not accept a `get`
argument). Note that for meaningful outputs, the kernel function must
represent or at least approximate the infinite-width kernel.
x_train:
training inputs.
y_train:
training targets.
learning_rate:
learning rate, step size.
diag_reg:
a scalar representing the strength of the diagonal regularization for
`kernel_fn(x_train, None, get)`, i.e. computing
`kernel_fn(x_train, None, get) + diag_reg * I` during Cholesky
factorization or eigendecomposition.
diag_reg_absolute_scale:
`True` for `diag_reg` to represent regularization in absolute units,
`False` to be
`diag_reg * np.mean(np.trace(kernel_fn(x_train, None, get)))`.
trace_axes:
`f(x_train)` axes such that `kernel_fn(x_train, None, get)`,
`kernel_fn(x_test, x_train, get)`[, and `kernel_fn(x_test, None, get)`]
lack these pairs of dimensions and are to be interpreted as
:math:`\Theta \otimes I`, i.e. block-diagonal along `trace_axes`. These
can can be specified either to save space and compute, or to even improve
approximation accuracy of the infinite-width or infinite-samples limit,
since in these limits the covariance along channel / feature / logit
axes indeed converges to a constant-diagonal matrix. However, if you
target linearized dynamics of a specific finite-width network,
`trace_axes=()` will yield most accurate result.
**kernel_fn_train_train_kwargs:
optional keyword arguments passed to `kernel_fn`. For train-train kernel,
these are passed to `kernel_fn` without changes. For test-test kernel,
they are passed to `kernel_fn`, unless overwritten by a similar
`**kernel_fn_test_test_kwargs` arguments passed to the `predict_fn`
function call. Finally, for test-train kernel, values that are tuples of
arrays (destined for calls of the finite-width network on training and
testing data) will be tuples of values combined from
`**kernel_fn_train_train_kwargs` and `**kernel_fn_test_test_kwargs`, and
all other values must match.
Returns:
A function with signature `predict_fn(t, x_test, get, compute_cov)`
returning either mean or mean and covariance of the infinite ensemble of
infinite-width networks outputs on `x_test` at time[s] `t`, in the `get`
regime (`"nngp"`, `"ntk"`, or `("nngp", "ntk")`).
"""
expm1 = _make_expm1_fn(y_train.size)
inv_expm1 = _make_inv_expm1_fn(y_train.size)
trace_axes = utils.canonicalize_axis(trace_axes, y_train)
trace_axes = tuple(-y_train.ndim + a for a in trace_axes)
n_trace_axes = len(trace_axes)
last_t_axes = range(-n_trace_axes, 0)
trace_shape = tuple(y_train.shape[a] for a in trace_axes)
y_train_flat = np.moveaxis(y_train, trace_axes, last_t_axes).reshape(
(-1,) + trace_shape)
k_dd_cache = {}
def get_k_train_train(get: Tuple[str, ...]) -> _Kernel:
if len(get) == 1:
get = get[0]
if get not in k_dd_cache:
k_dd_cache[get] = kernel_fn(x_train, None, get,
**kernel_fn_train_train_kwargs)
elif len(get) == 2:
if not any(g in k_dd_cache for g in get):
k_dd_cache.update(
kernel_fn(x_train, None, get,
**kernel_fn_train_train_kwargs)._asdict()) # pytype: disable=attribute-error # jax-ndarray
else:
for g in get:
if g not in k_dd_cache:
k_dd_cache[g] = kernel_fn(x_train, None, g,
**kernel_fn_train_train_kwargs)
else:
raise ValueError(get)
return _Kernel(**k_dd_cache)
@lru_cache(2)
def eigenspace(get: str):
k_dd = getattr(get_k_train_train((get,)), get)
k_dd = _add_diagonal_regularizer(utils.make_2d(k_dd), diag_reg,
diag_reg_absolute_scale)
evals, evecs = np.linalg.eigh(k_dd)
evals = np.expand_dims(evals, 0)
return evals, evecs
@lru_cache(4)
def predict_inf(get: Get):
_, get = utils.canonicalize_get(get)
k_dd = get_k_train_train(get)
return gp_inference(k_dd, y_train, diag_reg, diag_reg_absolute_scale,
trace_axes)
def get_kernels(get: Get, x_test: Optional[np.ndarray],
compute_cov: bool,
**kernel_fn_test_test_kwargs):
get = _get_dependency(get, compute_cov)
k_dd = get_k_train_train(get)
if x_test is None:
k_td = None
nngp_tt = compute_cov or None
else:
args_train, _ = utils.split_kwargs(kernel_fn_train_train_kwargs, x_train)
args_test, _ = utils.split_kwargs(kernel_fn_test_test_kwargs, x_test)
def is_array(x):
return tree_all(tree_map(
lambda x: isinstance(x, (onp.ndarray, np.ndarray)), x))
kwargs_td = dict(kernel_fn_train_train_kwargs)
kwargs_tt = dict(kernel_fn_train_train_kwargs)
for k in kernel_fn_test_test_kwargs:
v_tt = kernel_fn_test_test_kwargs[k]
v_dd = kernel_fn_train_train_kwargs[k]
if is_array(v_dd) and is_array(v_tt):
if (isinstance(v_dd, tuple) and len(v_dd) == 2 and
isinstance(v_tt, tuple) and len(v_tt) == 2):
v_td = (args_test[k], args_train[k])
else:
v_td = v_tt
elif v_dd != v_tt:
raise ValueError(f'Same keyword argument {k} of `kernel_fn` is set to'
f'different values {v_dd} != {v_tt} when computing '
f'the train-train and test-train/test-test kernels. '
f'If this is your intention, please submit a feature'
f' request at '
f'https://github.com/google/neural-tangents/issues')
else:
v_td = v_tt
kwargs_td[k] = v_td
kwargs_tt[k] = v_tt
k_td = kernel_fn(x_test, x_train, get, **kwargs_td)
if compute_cov:
nngp_tt = kernel_fn(x_test, None, 'nngp', **kwargs_tt)
else:
nngp_tt = None
return k_dd, k_td, nngp_tt
@utils.get_namedtuple('Gaussians')
def predict_fn(t: Optional[ArrayOrScalar] = None,
x_test: Optional[np.ndarray] = None,
get: Optional[Get] = None,
compute_cov: bool = False,
**kernel_fn_test_test_kwargs) -> Dict[str, Gaussian]:
"""Return output mean and covariance on the test set at time[s] `t`.
Args:
t:
a scalar of array of scalars of any shape. `t=None` is treated as
infinity and returns the same result as `t=np.inf`, but is computed
using linear solve for test predictions instead of eigendecomposition,
saving time and precision.
x_test:
test inputs. `None` means to return non-regularized (`diag_reg=0`)
predictions on the train-set inputs. For regularized predictions, pass
`x_test=x_train`.
get:
string, the mode of the Gaussian process, either "nngp" or "ntk", or a
tuple. `get=None` is equivalent to `get=("nngp", "ntk")`.
compute_cov:
if `True` computing both `mean` and `variance` and only `mean`
otherwise.
**kernel_fn_test_test_kwargs:
optional keyword arguments passed to `kernel_fn`. See also
`kernel_fn_train_train_kwargs` argument of the parent function.
Returns:
`fx_test_mean_t` or `(fx_test_mean_t, fx_test_cov_t)` if
`compute_cov == True` with potentially additional leading time dimensions.
"""
if get is None:
get = ('nngp', 'ntk')
# train-train, test-train, test-test.
k_dd, k_td, nngp_tt = get_kernels(get, x_test, compute_cov,
**kernel_fn_test_test_kwargs)
# Infinite time.
if t is None:
return predict_inf(get)(get=get, k_test_train=k_td,
k_test_test=nngp_tt)
# Finite time.
t = np.array(t) * learning_rate
t_shape = t.shape
t = t.reshape((-1, 1))
def reshape_mean(mean):
k = _get_first(k_dd if k_td is None else k_td)
mean = mean.reshape(t_shape + k.shape[::2] + trace_shape)
mean = np.moveaxis(mean, last_t_axes, trace_axes)
return mean
def reshape_cov(cov):
k = _get_first(k_dd if k_td is None else k_td)
cov_shape_t = t_shape + k.shape[::2] * 2
return utils.zip_axes(cov.reshape(cov_shape_t), len(t_shape))
out = {}
for g in get:
evals, evecs = eigenspace(g)
# Training set.
if k_td is None:
mean = np.einsum(
'ji,ti,ki,k...->tj...',
evecs, -expm1(evals, t), evecs, y_train_flat,
optimize=_optimize())
# Test set.
else:
neg_inv_expm1 = -inv_expm1(evals, t)
ktd_g = utils.make_2d(getattr(k_td, g))
mean = np.einsum(
'lj,ji,ti,ki,k...->tl...',
ktd_g, evecs, neg_inv_expm1, evecs, y_train_flat,
optimize=_optimize())
mean = reshape_mean(mean)
if nngp_tt is not None:
nngp_dd = utils.make_2d(k_dd.nngp)
# Training set.
if k_td is None:
if g == 'nngp':
cov = np.einsum(
'ji,ti,ki->tjk',
evecs,
(np.maximum(evals, 0.) *
np.exp(- 2 * np.maximum(evals, 0.) * t / y_train.size)),
evecs,
optimize=_optimize())
elif g == 'ntk':
exp = np.einsum(
'mi,ti,ki->tmk',
evecs,
np.exp(-np.maximum(evals, 0.) * t / y_train.size),
evecs,
optimize=_optimize())
cov = np.einsum(
'tmk,kl,tnl->tmn',
exp,
nngp_dd,
exp,
optimize=_optimize())
else:
raise ValueError(g)
# Test set.
else:
_nngp_tt = np.expand_dims(utils.make_2d(nngp_tt), 0)
if g == 'nngp':
cov = _nngp_tt - np.einsum(
'mj,ji,ti,ki,lk->tml',
ktd_g, evecs, -inv_expm1(evals, 2 * t), evecs, ktd_g,
optimize=_optimize())
elif g == 'ntk':
term_1 = np.einsum(
'mi,ti,ki,lk->tml',
evecs, neg_inv_expm1, evecs, ktd_g,
optimize=_optimize())
term_2 = np.einsum(
'mj,ji,ti,ki,lk->tml',
ktd_g, evecs, neg_inv_expm1, evecs, utils.make_2d(k_td.nngp),
optimize=_optimize())
term_2 += np.moveaxis(term_2, 1, 2)
cov = np.einsum(
'tji,jk,tkl->til',
term_1, nngp_dd, term_1,
optimize=_optimize())
cov += -term_2 + _nngp_tt
else:
raise ValueError(g)
out[g] = Gaussian(mean, reshape_cov(cov))
else:
out[g] = mean
return out
return predict_fn
def max_learning_rate(
ntk_train_train: np.ndarray,
y_train_size: Optional[int] = None,
momentum=0.,
eps: float = 1e-12) -> float:
r"""Computes the maximal feasible learning rate for infinite width NNs.
The network is assumed to be trained using mini-/full-batch GD + momentum
with mean squared loss. The loss is assumed to have the form
`1/(2 * batch_size * output_size) \|f(train_x) - train_y\|^2`. For vanilla SGD
(i.e. `momentum = 0`) the maximal feasible learning rate is the largest `\eta`
such that the operator `(I - \eta / (batch_size * output_size) * NTK)` is a
contraction, which is `2 * batch_size * output_size * lambda_max(NTK)`. When
`momentum > 0`, we use
`2 * (1 + momentum) * batch_size * output_size * lambda_max(NTK)` (see
*The Dynamics of Momentum* section in
"`Why Momentum Really Works <https://distill.pub/2017/momentum/>`_").
Args:
ntk_train_train:
analytic or empirical NTK on the training data.
y_train_size:
total training set output size, i.e.
`f(x_train).size == y_train.size`. If `output_size=None` it is inferred
from `ntk_train_train.shape` assuming `trace_axes=()`.
momentum:
The `momentum` for momentum optimizers.
eps:
a float to avoid zero divisor.
Returns:
The maximal feasible learning rate for infinite width NNs.
"""
ntk_train_train = utils.make_2d(ntk_train_train)
factor = ntk_train_train.shape[0] if y_train_size is None else y_train_size # pytype: disable=attribute-error # jax-ndarray
if _is_on_cpu(ntk_train_train):
max_eva = osp.linalg.eigvalsh(ntk_train_train,
eigvals=(ntk_train_train.shape[0] - 1, # pytype: disable=attribute-error # jax-ndarray
ntk_train_train.shape[0] - 1))[-1] # pytype: disable=attribute-error # jax-ndarray
else:
max_eva = np.linalg.eigvalsh(ntk_train_train)[-1]
lr = 2 * (1 + momentum) * factor / (max_eva + eps)
return lr
# INTERNAL UTILITIES
def _optimize() -> str:
"""Return contraction order for `np.einsum` based on platform.
Introduced after https://github.com/google/jax/pull/7512 since TPU seems to
be more precise in `greeedy` mode.
"""
return 'greedy' if jax.default_backend() == 'tpu' else 'optimal'
def _get_dependency(get: Get, compute_cov: bool) -> Tuple[str, ...]:
"""Figure out dependency for get."""
_, get = utils.canonicalize_get(get)
for g in get:
if g not in ['nngp', 'ntk']:
raise NotImplementedError(
'Can only get either "nngp" or "ntk" predictions, got %s.' % g)
get_dependency = ()
if 'nngp' in get or ('ntk' in get and compute_cov):
get_dependency += ('nngp',)
if 'ntk' in get:
get_dependency += ('ntk',)
return get_dependency
def _get_fns_in_eigenbasis(
k_train_train: np.ndarray,
diag_reg: float,
diag_reg_absolute_scale: bool,
fns: Iterable[Callable[[np.ndarray, np.ndarray], np.ndarray]]
) -> Generator[Callable[[np.ndarray, np.ndarray], np.ndarray], None, None]:
"""Build functions of a matrix in its eigenbasis.
Args:
k_train_train:
an n x n matrix.
diag_reg:
diagonal regularizer strength.
diag_reg_absolute_scale:
`True` to use absolute (vs relative to mean trace) regulatization.
fns:
a sequence of functions that add on the eigenvalues (evals, dt) ->
modified_evals.
Returns:
A tuple of functions that act as functions of the matrix mat
acting on vectors: `transform(vec, dt) = fn(mat, dt) @ vec`
"""
k_train_train = utils.make_2d(k_train_train)
k_train_train = _add_diagonal_regularizer(k_train_train, diag_reg,
diag_reg_absolute_scale)
evals, evecs = np.linalg.eigh(k_train_train)
evals = np.expand_dims(evals, 0)
def to_eigenbasis(fn):
"""Generates a transform given a function on the eigenvalues."""
def new_fn(y_train, t):
return np.einsum('ji,ti,ki,k...->tj...',
evecs, fn(evals, t), evecs, y_train,
optimize=_optimize())
return new_fn
return (to_eigenbasis(fn) for fn in fns)
def _add_diagonal_regularizer(A: np.ndarray,
diag_reg: float,
diag_reg_absolute_scale: bool) -> np.ndarray:
dimension = A.shape[0]
if not diag_reg_absolute_scale:
diag_reg *= np.trace(A) / dimension
return A + diag_reg * np.eye(dimension)
def _get_cho_solve(A: np.ndarray,
diag_reg: float,
diag_reg_absolute_scale: bool,
lower: bool = False) -> Callable[[np.ndarray, Axes],
np.ndarray]:
x_non_channel_shape = A.shape[1::2]
A = utils.make_2d(A)
A = _add_diagonal_regularizer(A, diag_reg, diag_reg_absolute_scale)
C = sp.linalg.cho_factor(A, lower)
def cho_solve(b: np.ndarray, b_axes: Axes) -> np.ndarray:
b_axes = utils.canonicalize_axis(b_axes, b)
last_b_axes = range(-len(b_axes), 0)
x_shape = x_non_channel_shape + tuple(b.shape[a] for a in b_axes)
b = np.moveaxis(b, b_axes, last_b_axes)
b = b.reshape((A.shape[1], -1))
x = sp.linalg.cho_solve(C, b)
x = x.reshape(x_shape)
return x
return cho_solve
def _get_fx_test_shape(y_train: np.ndarray,
k_test_train: np.ndarray,
y_axes: Axes) -> Tuple[int, ...]:
if k_test_train is None:
return y_train.shape
shape = list(k_test_train.shape[::2])
y_axes = utils.canonicalize_axis(y_axes, y_train)
for i, c in enumerate(y_train.shape):
if i in y_axes:
shape.insert(i, c)
return tuple(shape)
def _make_expm1_fn(normalization: float):
def expm1_fn(evals: np.ndarray, t: np.ndarray):
# Since our matrix really should be positive semidefinite,
# we can threshold the eigenvalues to squash ones that are negative
# for numerical reasons.
return np.expm1(-np.maximum(evals, 0.) * t / normalization)
return expm1_fn
def _make_inv_expm1_fn(normalization: float):
expm1_fn = _make_expm1_fn(normalization)
def _inv_expm1_fn(evals: np.ndarray, t: np.ndarray):
return expm1_fn(evals, t) / np.abs(evals)
return _inv_expm1_fn
def _check_inputs(fx_train_or_state_0: Union[ArrayOrScalar, ODEState],
fx_test_0: ArrayOrScalar,
k_test_train: Optional[np.ndarray]):
if isinstance(fx_train_or_state_0, ODEState):
if fx_test_0 is not None:
raise ValueError('`fx_test_0` is included in `ODEState` and must be set '
'to `None`.')
fx_train_0 = fx_train_or_state_0.fx_train
fx_test_0 = fx_train_or_state_0.fx_test
else:
fx_train_0 = fx_train_or_state_0
if fx_train_0 is None and fx_test_0 is None:
raise ValueError('Both `fx_train_0` and `fx_test_0` are `None`, i.e. no '
'predictions will be computed.')
if fx_test_0 is not None and k_test_train is None:
raise ValueError('To get predictions on the test set, please provide '
'`k_test_train` kernel to the parent function.')
def _get_axes(x: np.ndarray):
n = x.ndim
return (
tuple(range(0, n, 2)),
tuple(range(1, n, 2)),
tuple(range(0, n // 2)),
tuple(range(n // 2, n))
)
def _get_first(k) -> np.ndarray:
if isinstance(k, (onp.ndarray, np.ndarray)):
return k
for g in ('nngp', 'ntk'):
if hasattr(k, g):
v = getattr(k, g)
if v is not None:
return v
raise ValueError(k)
def _get_attr(k, g: str) -> np.ndarray:
if isinstance(k, (onp.ndarray, np.ndarray)):
return k
return getattr(k, g)
def _is_on_cpu(x: PyTree) -> bool:
def _arr_is_on_cpu(x: np.ndarray) -> bool:
# TODO(romann): revisit when https://github.com/google/jax/issues/1431 and
# https://github.com/google/jax/issues/1432 are fixed.
if hasattr(x, 'device_buffer'):
return 'cpu' in str(x.device_buffer.device()).lower()
if isinstance(x, (onp.ndarray, np.ndarray)):
return True
raise NotImplementedError(type(x))
return tree_all(tree_map(_arr_is_on_cpu, x))
| 49,629 | 35.817507 | 127 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/monte_carlo.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function to compute Monte Carlo NNGP and NTK estimates.
This module contains a function `monte_carlo_kernel_fn` that allow to compute
Monte Carlo estimates of NNGP and NTK kernels of arbitrary functions. For more
details on how individual samples are computed, refer to `utils/empirical.py`.
Note that the `monte_carlo_kernel_fn` accepts arguments like `batch_size`,
`device_count`, and `store_on_device`, and is appropriately batched /
parallelized. You don't need to apply the :obj:`~neural_tangents.batch` or
:obj:`jax.jit` decorators to it. Further, you do not need to apply
:obj:`jax.jit` to the input `apply_fn` function, as the resulting empirical
kernel function is JITted internally.
"""
from functools import partial
import operator
from typing import Generator, Iterable, Optional, Set, Tuple, Union
from .batching import batch
from .empirical import empirical_kernel_fn, NtkImplementation, DEFAULT_NTK_IMPLEMENTATION, _DEFAULT_NTK_FWD, _DEFAULT_NTK_S_RULES, _DEFAULT_NTK_J_RULES
from jax import random
import jax.numpy as np
from jax.tree_util import tree_map
from .utils import utils
from .utils.typing import ApplyFn, Axes, EmpiricalGetKernelFn, Get, InitFn, MonteCarloKernelFn, NTTree, PyTree, VMapAxes
def _sample_once_kernel_fn(
kernel_fn: EmpiricalGetKernelFn,
init_fn: InitFn,
batch_size: int = 0,
device_count: int = -1,
store_on_device: bool = True
):
@partial(batch,
batch_size=batch_size,
device_count=device_count,
store_on_device=store_on_device)
def kernel_fn_sample_once(
x1: NTTree[np.ndarray],
x2: Optional[NTTree[np.ndarray]],
key: random.KeyArray,
get: Get,
**apply_fn_kwargs):
init_key, dropout_key = random.split(key, 2)
shape = tree_map(lambda x: x.shape, x1)
_, params = init_fn(init_key, shape)
return kernel_fn(x1, x2, get, params, rng=dropout_key, **apply_fn_kwargs)
return kernel_fn_sample_once
def _sample_many_kernel_fn(
kernel_fn_sample_once,
key: random.KeyArray,
n_samples: Set[int],
get_generator: bool):
def normalize(sample: PyTree, n: int) -> PyTree:
return tree_map(lambda sample: sample / n, sample)
def get_samples(
x1: NTTree[np.ndarray],
x2: Optional[NTTree[np.ndarray]],
get: Get,
**apply_fn_kwargs):
_key = key
ker_sampled = None
for n in range(1, max(n_samples) + 1):
_key, split = random.split(_key)
one_sample = kernel_fn_sample_once(x1, x2, split, get, **apply_fn_kwargs)
if ker_sampled is None:
ker_sampled = one_sample
else:
ker_sampled = tree_map(operator.add, ker_sampled, one_sample)
yield n, ker_sampled
if get_generator:
@utils.get_namedtuple('MonteCarloKernel')
def get_sampled_kernel(
x1: np.ndarray,
x2: np.ndarray,
get: Optional[Get] = None,
**apply_fn_kwargs
) -> Generator[Union[np.ndarray, Tuple[np.ndarray, ...]], None, None]:
for n, sample in get_samples(x1, x2, get, **apply_fn_kwargs):
if n in n_samples:
yield normalize(sample, n)
else:
@utils.get_namedtuple('MonteCarloKernel')
def get_sampled_kernel(
x1: np.ndarray,
x2: np.ndarray,
get: Optional[Get] = None,
**apply_fn_kwargs
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
for n, sample in get_samples(x1, x2, get, **apply_fn_kwargs):
pass
return normalize(sample, n)
return get_sampled_kernel
def monte_carlo_kernel_fn(
init_fn: InitFn,
apply_fn: ApplyFn,
key: random.KeyArray,
n_samples: Union[int, Iterable[int]],
batch_size: int = 0,
device_count: int = -1,
store_on_device: bool = True,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = (),
vmap_axes: Optional[VMapAxes] = None,
implementation: Union[int, NtkImplementation] = DEFAULT_NTK_IMPLEMENTATION,
_j_rules: bool = _DEFAULT_NTK_J_RULES,
_s_rules: bool = _DEFAULT_NTK_S_RULES,
_fwd: Optional[bool] = _DEFAULT_NTK_FWD,
) -> MonteCarloKernelFn:
r"""Return a Monte Carlo sampler of NTK and NNGP kernels of a given function.
Note that the returned function is appropriately batched / parallelized. You
don't need to apply the `nt.batch` or `jax.jit` decorators to it. Further,
you do not need to apply `jax.jit` to the input `apply_fn` function, as the
resulting empirical kernel function is JITted internally.
Args:
init_fn:
a function initializing parameters of the neural network. From
:obj:`jax.example_libraries.stax`: "takes an rng key and an input shape
and returns an `(output_shape, params)` pair".
apply_fn:
a function computing the output of the neural network.
From :obj:`jax.example_libraries.stax`: "takes params, inputs, and an
rng key and applies the layer".
key:
RNG (`jax.random.PRNGKey`) for sampling random networks. Must have
shape `(2,)`.
n_samples:
number of Monte Carlo samples. Can be either an integer or an
iterable of integers at which the resulting generator will yield
estimates. Example: use `n_samples=[2**k for k in range(10)]` for the
generator to yield estimates using 1, 2, 4, ..., 512 Monte Carlo samples.
batch_size: an integer making the kernel computed in batches of `x1` and
`x2` of this size. `0` means computing the whole kernel. Must divide
`x1.shape[0]` and `x2.shape[0]`.
device_count:
an integer making the kernel be computed in parallel across
this number of devices (e.g. GPUs or TPU cores). `-1` means use all
available devices. `0` means compute on a single device sequentially. If
not `0`, must divide `x1.shape[0]`.
store_on_device:
a boolean, indicating whether to store the resulting
kernel on the device (e.g. GPU or TPU), or in the CPU RAM, where larger
kernels may fit.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
vmap_axes:
applicable only to NTK. A triple of `(in_axes, out_axes, kwargs_axes)`
passed to `vmap` to evaluate the empirical NTK in parallel ove these axes.
Precisely, providing this argument implies that `f(params, x, **kwargs)`
equals to a concatenation along `out_axes` of `f` applied to slices of
`x` and `**kwargs` along `in_axes` and `kwargs_axes`, i.e. `f` can be
evaluated as a `vmap`. This allows to evaluate Jacobians much more
efficiently. If `vmap_axes` is not a triple, it is interpreted as
`in_axes = out_axes = vmap_axes, kwargs_axes = {}`. For example a very
common usecase is `vmap_axes=0` for a neural network with leading (`0`)
batch dimension, both for inputs and outputs, and no interactions between
different elements of the batch (e.g. no BatchNorm, and, in the case of
`nt.stax`, also no Dropout). However, if there is interaction between
batch elements or no concept of a batch axis at all, `vmap_axes` must be
set to `None`, to avoid wrong (and potentially silent) results.
implementation:
Applicable only to NTK, an :class:`NtkImplementation` value (or an
:class:`int` `0`, `1`, `2`, or `3`). See the :class:`NtkImplementation`
docstring for details.
_j_rules:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
custom Jacobian rules for intermediary primitive `dy/dw` computations for
MJJMPs (matrix-Jacobian-Jacobian-matrix products). Set to `False` to use
JVPs or VJPs, via JAX's :obj:`jax.jacfwd` or :obj:`jax.jacrev`. Custom
Jacobian rules (`True`) are expected to be not worse, and sometimes better
than automated alternatives, but in case of a suboptimal implementation
setting it to `False` could improve performance.
_s_rules:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
efficient MJJMp rules for structured `dy/dw` primitive Jacobians. In
practice should be set to `True`, and setting it to `False` can lead to
dramatic deterioration of performance.
_fwd:
Internal debugging parameter, applicable only to NTK when
`implementation` is :attr:`~NtkImplementation.STRUCTURED_DERIVATIVES`
(`3`) or :attr:`~NtkImplementation.AUTO` (`0`). Set to `True` to allow
:obj:`jax.jvp` in intermediary primitive Jacobian `dy/dw` computations,
`False` to always use :obj:`jax.vjp`. `None` to decide automatically
based on input/output sizes. Applicable when `_j_rules=False`, or when a
primitive does not have a Jacobian rule. Should be set to `None` for best
performance.
Returns:
If `n_samples` is an integer, returns a function of signature
`kernel_fn(x1, x2, get)` that returns an MC estimation of the kernel using
`n_samples`. If `n_samples` is a collection of integers,
`kernel_fn(x1, x2, get)` returns a generator that yields estimates using
`n` samples for `n in n_samples`.
Example:
>>> from jax import random
>>> import neural_tangents as nt
>>> from neural_tangents import stax
>>> #
>>> key1, key2 = random.split(random.PRNGKey(1), 2)
>>> x_train = random.normal(key1, (20, 32, 32, 3))
>>> y_train = random.uniform(key1, (20, 10))
>>> x_test = random.normal(key2, (5, 32, 32, 3))
>>> #
>>> init_fn, apply_fn, _ = stax.serial(
>>> stax.Conv(128, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(256, (3, 3)),
>>> stax.Relu(),
>>> stax.Conv(512, (3, 3)),
>>> stax.Flatten(),
>>> stax.Dense(10)
>>> )
>>> #
>>> n_samples = 200
>>> kernel_fn = nt.monte_carlo_kernel_fn(init_fn, apply_fn, key1, n_samples)
>>> kernel = kernel_fn(x_train, x_test, get=('nngp', 'ntk'))
>>> # `kernel` is a tuple of NNGP and NTK MC estimate using `n_samples`.
>>> #
>>> n_samples = [1, 10, 100, 1000]
>>> kernel_fn_generator = nt.monte_carlo_kernel_fn(init_fn, apply_fn, key1,
>>> n_samples)
>>> kernel_samples = kernel_fn_generator(x_train, x_test,
>>> get=('nngp', 'ntk'))
>>> for n, kernel in zip(n_samples, kernel_samples):
>>> print(n, kernel)
>>> # `kernel` is a tuple of NNGP and NTK MC estimate using `n` samples.
"""
kwargs = dict(
f=apply_fn,
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
vmap_axes=vmap_axes,
implementation=implementation,
_s_rules=_s_rules,
_j_rules=_j_rules,
_fwd=_fwd
)
kernel_fn = empirical_kernel_fn(**kwargs)
kernel_fn_sample_once = _sample_once_kernel_fn(
kernel_fn=kernel_fn,
init_fn=init_fn,
batch_size=batch_size,
device_count=device_count,
store_on_device=store_on_device
)
n_samples, get_generator = _canonicalize_n_samples(n_samples)
kernel_fn = _sample_many_kernel_fn(
kernel_fn_sample_once=kernel_fn_sample_once,
key=key,
n_samples=n_samples,
get_generator=get_generator
)
return kernel_fn
def _canonicalize_n_samples(
n_samples: Union[int, Iterable[int]]) -> Tuple[Set[int], bool]:
get_generator = True
if isinstance(n_samples, int):
get_generator = False
n_samples = (n_samples,)
if hasattr(n_samples, '__iter__'):
n_samples = set(n_samples)
if not all(isinstance(n, int) for n in n_samples):
raise ValueError(f'`n_samples` must contain only integers, '
f'got {n_samples}.')
if any(n <= 0 for n in n_samples):
raise ValueError(f'`n_samples` must be positive, got {n_samples}.')
else:
raise TypeError(f'`n_samples` must be either an integer of a set of '
f'integers, got {type(n_samples)}.')
return n_samples, get_generator
| 14,540 | 40.784483 | 151 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/stax/combinators.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer combinators."""
import operator as op
from typing import Any, Callable, Dict, List
import warnings
import frozendict
from jax import random, lax
import jax.example_libraries.stax as ostax
from .requirements import Diagonal, get_req, layer, requires
from ..utils.kernel import Kernel
from ..utils.typing import InternalLayer, Layer, LayerKernelFn, NTTree, NTTrees, Shapes
@layer
def serial(*layers: Layer) -> InternalLayer:
"""Combinator for composing layers in serial.
Based on :obj:`jax.example_libraries.stax.serial`.
Args:
*layers:
a sequence of layers, each an `(init_fn, apply_fn, kernel_fn)` triple.
See Also:
:obj:`~neural_tangents.stax.repeat` for compiled repeated composition.
Returns:
A new layer, meaning an `(init_fn, apply_fn, kernel_fn)` triple,
representing the serial composition of the given sequence of layers.
"""
init_fns, apply_fns, kernel_fns = zip(*layers)
init_fn, apply_fn = ostax.serial(*zip(init_fns, apply_fns))
@requires(**_get_input_req_attr(kernel_fns, fold=op.rshift))
def kernel_fn(k: NTTree[Kernel], **kwargs) -> NTTree[Kernel]:
# TODO(xlc): if we drop `x1_is_x2` and use `rng` instead, need split key
# inside kernel functions here and parallel below.
for f in kernel_fns:
k = f(k, **kwargs)
return k
return init_fn, apply_fn, kernel_fn
@layer
def repeat(layer: Layer, n: int) -> InternalLayer:
"""Compose `layer` in a compiled loop `n` times.
Equivalent to `serial(*([layer] * n))`, but allows faster compilation time
for large `n` (but same runtime).
.. warning::
`apply_fn` of the `layer` is assumed to keep the activation (`x`) shape
unchanged.
.. warning::
`kernel_fn` of the `layer` is assumed to keep the
:class:`~neural_tangents.Kernel` metadata unchanged. This is most notably
not satisfied in :obj:`~neural_tangents.stax.Conv` and other convolutional
layers which flip the `is_reversed` attribute with each application. A
workaround is to either use `serial(*([layer] * n))`, or to use
`repeat(serial(layer, layer), n // 2)` instead of `repeat(layer, n)` for an
even `n`, i.e. to use two (or, generally, any even number of) convolutions
per `layer` instead of one (or, generally, any odd number), such that
`layer` does not alter the `is_reversed` attribute. Similar caution should
be applied to other :class:`~neural_tangents.Kernel` attributes.
See Also:
`RepeatTest` in `tests/stax/combinators_test.py` for examples and
:obj:`~neural_tangents.stax.serial` for unrolled composition.
Example:
>>> from neural_tangents import stax
>>> #
>>> layer = stax.serial(stax.Dense(128), stax.Relu())
>>> depth = 100
>>> #
>>> # Unrolled loop:
>>> nn_unrolled = stax.serial(*([layer] * depth))
>>> #
>>> # Compiled loop:
>>> nn_compiled = stax.repeat(layer, depth)
>>> # `nn_unrolled` and `nn_compiled` perform the same computation, but
>>> # `nn_compiled` compiles faster and with smaller memory footprint.
Args:
layer:
layer to be repeated. Outputs must have the same shape and other metadata
as inputs.
n:
number of times to repeat a layer (depth).
Returns:
A new layer, meaning an `(init_fn, apply_fn, kernel_fn)` triple,
representing the repeated composition of `layer` `n` times.
"""
init_fn, apply_fn, kernel_fn = layer
def init_fn_repeat(rng, input_shape):
out_shape, _ = init_fn(rng, input_shape)
if out_shape != input_shape:
raise ValueError(
f'`init_fn` produces a different output shape {out_shape} than the '
f'input shape {input_shape}. Please use the `serial(*([layer] * n)`) '
f'construction in this setting.'
)
def init_fn_scan(rng, params):
rng, layer_rng = random.split(rng)
out_shape, params = init_fn(layer_rng, input_shape)
return rng, params
_, params = lax.scan(init_fn_scan, rng, None, n)
return out_shape, params
def apply_fn_repeat(params, inputs, **kwargs):
def apply_fn_scan(x, params):
return apply_fn(params, x, **kwargs), None
outputs, _ = lax.scan(apply_fn_scan, inputs, params, n)
return outputs
@requires(**get_req(kernel_fn))
def kernel_fn_repeat(k: NTTree[Kernel], **kwargs) -> NTTree[Kernel]:
if n > 0:
k = kernel_fn(k, **kwargs)
def kernel_fn_scan(k, _):
k = kernel_fn(k, **kwargs)
return k, None
k, _ = lax.scan(kernel_fn_scan, k, None, n - 1)
return k
return init_fn_repeat, apply_fn_repeat, kernel_fn_repeat
@layer
def parallel(*layers: Layer) -> InternalLayer:
"""Combinator for composing layers in parallel.
The layer resulting from this combinator is often used with the
:obj:`~neural_tangents.stax.FanOut`, :obj:`~neural_tangents.stax.FanInSum`,
and :obj:`~neural_tangents.stax.FanInConcat` layers. Based on
:obj:`jax.example_libraries.stax.parallel`.
Args:
*layers:
a sequence of layers, each with a `(init_fn, apply_fn, kernel_fn)` triple.
Returns:
A new layer, meaning an `(init_fn, apply_fn, kernel_fn)` triples,
representing the parallel composition of the given sequence of layers. In
particular, the returned layer takes a sequence of inputs and returns a
sequence of outputs with the same length as the argument `layers`.
"""
init_fns, apply_fns, kernel_fns = zip(*layers)
init_fn_stax, apply_fn_stax = ostax.parallel(*zip(init_fns, apply_fns))
def init_fn(rng: random.KeyArray, input_shape: Shapes):
return type(input_shape)(init_fn_stax(rng, input_shape))
def apply_fn(params, inputs, **kwargs):
return type(inputs)(apply_fn_stax(params, inputs, **kwargs))
@requires(**_get_input_req_attr(kernel_fns, fold=op.and_))
def kernel_fn(ks: NTTrees[Kernel], **kwargs) -> NTTrees[Kernel]:
return type(ks)(f(k, **kwargs) for k, f in zip(ks, kernel_fns))
return init_fn, apply_fn, kernel_fn
# INTERNAL UTILITIES
def _get_input_req_attr(
kernel_fns: List[LayerKernelFn],
fold: Callable[[Diagonal, Diagonal], Diagonal]) -> Dict[str, Any]:
"""Gets requirements of the combined layer based on individual requirements.
Specifically, gets the requirements / allowances to the inputs to a `serial`
or `parallel` sequence of layers based on requirements of each layer, setting
requirements / allowances to the most / least demanding among all layers.
Args:
kernel_fns:
list of `kernel_fn`s fed to the `kernel_fns` (e.g. a list of
convolutional layers and nonlinearities to be chained together with the
`serial` combinator) or evaluated in parallel (`parallel` combinator).
fold:
binary associative operator to combine allowances of consecutive
individual `kernel_fn`s. Can be only `operator.rshift` (`>>`), i.e.
composition (corresponding to `serial`) or `operator.and_`, (`&`), i.e.
`AND` (corresponding to `parallel`).
Returns:
A `dict` with combined requirements / allowances.
"""
req = {}
for f in kernel_fns:
req_f = get_req(f, default=frozendict.frozendict())
for k, v in req_f.items():
if k == 'use_dropout':
if k in req and req[k] != v:
raise ValueError('`use_dropout` is a single whole-network attribute '
'and cannot be set to different values.')
req[k] = v
elif k in ('batch_axis', 'channel_axis'):
if k not in req:
req[k] = v
else:
if fold is op.and_:
if k in req and req[k] != v:
if (req[k] >= 0 and v >= 0) or (req[k] < 0 and v < 0):
warnings.warn(f'For `kernel_fn`, `{k}` parameters must match in'
f' all parallel branches, got {req[k]} and {v}. '
f'This WILL lead to [silent] errors if '
f'`kernel_fn` is called.')
else:
warnings.warn(f'Got potentially mismatching `{k}` values in '
f'parallel branches: {req[k]} and {v}.')
elif fold is not op.rshift:
raise ValueError(fold)
elif k in ('diagonal_batch', 'diagonal_spatial'):
if k in req:
req[k] = fold(req[k], v)
else:
req[k] = v
else:
raise NotImplementedError(k)
return req
| 8,959 | 34 | 87 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/stax/requirements.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Requirement management for :obj:`~neural_tangents.stax` layers."""
import enum
from typing import Callable, Optional, Tuple, Union, Sequence, Type
import warnings
import frozendict
import jax
from jax import lax
from jax import numpy as np
from jax import eval_shape
from jax.core import ShapedArray
from jax.tree_util import tree_map, tree_all
from ..utils import utils
import dataclasses
from ..utils import dataclasses as nt_dataclasses
from ..utils.kernel import Kernel
from ..utils.typing import AnalyticKernelFn, Axes, Get, InitFn, ApplyFn, InternalLayer, Layer, LayerKernelFn, NTTree, PyTree
import numpy as onp
# Public decorators
def layer(layer_fn: Callable[..., InternalLayer]) -> Callable[..., Layer]:
"""A convenience decorator to be added to all public layers.
Used in :obj:`~neural_tangents.stax.Relu` etc.
Makes the `kernel_fn` of the layer work with both input
:class:`jax.numpy.ndarray` (when the layer is the first one applied to
inputs), and with :class:`~neural_tangents.Kernel` for intermediary layers.
Also adds optional arguments to the `kernel_fn` to allow specifying the
computation and returned results with more flexibility.
Args:
layer_fn: Layer function returning triple `(init_fn, apply_fn, kernel_fn)`.
Returns:
A function with the same signature as `layer` with `kernel_fn` now
accepting :class:`jax.numpy.ndarray` as inputs if needed, and accepts
optional `get`, `diagonal_batch`, `diagonal_spatial` arguments.
"""
name = layer_fn.__name__
@utils.wraps(layer_fn)
def new_layer_fns(*args, **kwargs):
init_fn, apply_fn, kernel_fn = layer_fn(*args, **kwargs)
kernel_fn = _preprocess_kernel_fn(init_fn, apply_fn, kernel_fn)
init_fn.__name__ = apply_fn.__name__ = kernel_fn.__name__ = name
return init_fn, apply_fn, kernel_fn
return new_layer_fns
def requires(**static_reqs):
"""Returns a decorator that augments `kernel_fn` with consistency checks.
Use this to specify your `kernel_fn` input kernel requirements.
See Also:
:class:`Diagonal`, :class:`Bool`.
"""
def req(kernel_fn: LayerKernelFn):
"""Returns `kernel_fn` with additional consistency checks."""
@utils.wraps(kernel_fn)
def new_kernel_fn(k: NTTree[Kernel], **kwargs) -> NTTree[Kernel]:
"""Executes `kernel_fn` on `kernels` after checking consistency."""
fused_reqs = _fuse_requirements(static_reqs, {}, **kwargs)
# `FanInConcat / FanInSum` have no requirements and
# execute custom consistency checks.
if isinstance(k, Kernel):
for key, v in fused_reqs.items():
if v is not None: # `None` is treated as explicitly not having a req.
if key in ('diagonal_batch', 'diagonal_spatial'):
if (getattr(k, key) is True and
(v is False or
(isinstance(v, Diagonal) and v.input == Bool.NO))):
raise ValueError(f'{kernel_fn} requires `{key} == {v}`, but '
f'input kernel has `{key} == True`, hence '
f'does not contain sufficient information. '
f'Please recompute the input kernel with '
f'`{key} == {v}`.')
elif key in ('batch_axis', 'channel_axis'):
ndim = len(k.shape1) # pytype: disable=attribute-error # preserve-union-macros
v_kernel = getattr(k, key)
v_pos = v % ndim
if v_kernel != v_pos:
raise ValueError(f'{kernel_fn} requires `{key} == {v_pos}`, '
f'but input kernel has `{key} == {v_kernel}`, '
f'making the infinite limit ill-defined.')
else:
# Any other name is recognized as a keyword-argument threaded
# through all `kernel_fn` down to `_inputs_to_kernel` rather than
# a requirement for this layer.
pass
return kernel_fn(k, **kwargs)
_set_req(new_kernel_fn, frozendict.frozendict(static_reqs))
return new_kernel_fn
return req
def supports_masking(remask_kernel: bool):
"""Returns a decorator that turns layers into layers supporting masking.
Specifically:
1. `init_fn` is left unchanged.
2. `apply_fn` is turned from a function that accepts a `mask=None` keyword
argument (which indicates `inputs[mask]` must be masked), into a function
that accepts a `mask_constant=None` keyword argument (which indicates
`inputs[inputs == mask_constant]` must be masked).
3. `kernel_fn` is modified to
3.a. propagate the `kernel.mask1` and `kernel.mask2` through intermediary
layers, and,
3.b. if `remask_kernel == True`, zeroes-out covariances between entries of
which at least one is masked.
4. If the decorated layers has a `mask_fn`, it is used to propagate masks
forward through the layer, in both `apply_fn` and `kernel_fn`. If not, it is
assumed the mask remains unchanged.
Must be applied before the `layer` decorator.
See Also:
Example of masking application in `examples/imdb.py`.
Args:
remask_kernel:
`True` to zero-out kernel covariance entries between masked inputs after
applying `kernel_fn`. Some layers don't need this and setting
`remask_kernel=False` can save compute.
Returns:
A decorator that turns functions returning
`(init_fn, apply_fn, kernel_fn[, mask_fn])`
into functions returning
`(init_fn, apply_fn_with_masking, kernel_fn_with_masking)`.
"""
def supports_masking(layer):
@utils.wraps(layer)
def layer_with_masking(*args, **kwargs) -> InternalLayer:
layer_fns = layer(*args, **kwargs)
init_fn, apply_fn, kernel_fn = layer_fns[:3]
if len(layer_fns) == 3:
# No mask propagation function supplied - use identity.
_mask_fn = lambda mask, input_shape: mask
elif len(layer_fns) == 4:
# Custom mask propagation function supplied.
_mask_fn = layer_fns[3]
else:
raise ValueError(f'Expected 3 (`init_fn`, `apply_fn`, `kernel_fn`) or 4'
f' (..., `mask_fn`) layer functions, '
f'got {len(layer_fns)}.')
@utils.wraps(_mask_fn)
def mask_fn(mask, input_shape):
if mask is None:
return None
return _mask_fn(mask, input_shape)
def apply_fn_with_masking(params, inputs, *,
mask_constant=None, **kwargs):
masked_inputs = tree_map(
lambda x: _get_masked_array(x, mask_constant),
inputs,
is_leaf=lambda x: isinstance(x, (np.ndarray, MaskedArray)))
is_leaf = lambda x: isinstance(x, MaskedArray)
inputs = tree_map(
lambda x: x.masked_value,
masked_inputs,
is_leaf=is_leaf)
mask = tree_map(
lambda x: x.mask,
masked_inputs,
is_leaf=is_leaf)
outputs = apply_fn(params, inputs, mask=mask, **kwargs)
outputs_mask = mask_fn(mask,
inputs.shape if isinstance(inputs, np.ndarray)
else [i.shape for i in inputs])
if outputs_mask is None:
return outputs
return MaskedArray(outputs, outputs_mask) # pytype:disable=wrong-arg-count
def kernel_fn_with_masking(k: NTTree[Kernel], **user_reqs):
is_leaf = lambda k: isinstance(k, Kernel)
mask1 = tree_map(lambda k: k.mask1, k, is_leaf=is_leaf)
shape1 = tree_map(lambda k: k.shape1, k, is_leaf=is_leaf)
mask2 = tree_map(lambda k: k.mask2, k, is_leaf=is_leaf)
shape2 = tree_map(lambda k: k.shape2, k, is_leaf=is_leaf)
mask1, mask2 = mask_fn(mask1, shape1), mask_fn(mask2, shape2)
k = kernel_fn(k, **user_reqs) # type: Kernel
if remask_kernel:
remask_fn = lambda k, m1, m2: k.mask(m1, m2)
else:
remask_fn = lambda k, m1, m2: k.replace(mask1=m1, mask2=m2)
k = tree_map(remask_fn, k, mask1, mask2, is_leaf=is_leaf)
return k
if _has_req(kernel_fn):
_set_req(kernel_fn_with_masking, get_req(kernel_fn))
return init_fn, apply_fn_with_masking, kernel_fn_with_masking
return layer_with_masking
return supports_masking
def unmask_fn(fn: ApplyFn) -> ApplyFn:
"""Make a function returning a `MaskedArray` return a `np.ndarray`.
Useful if you pass `masked_constant` to your `apply_fn` in order to have
variable-length inputs. In this case `apply_fn` returns a `MaskedArray`
that stores the information about which entries are masked (for convenient
chaining with further functions operating on masked inputs). This decorator
replaces the output `MaskedArray` with an `np.ndarray` where masked
entries are zeroed-out, which is convenient to pass to functions operating on
arrays, such as :obj:`~neural_tangents.monte_carlo_kernel_fn` or
:obj:`~neural_tangents.empirical_kernel_fn`.
.. warning::
In some cases you may want to define your own custom unmasking behavior,
e.g. one that normalizes the values based on the number of non-zero entries.
See Also:
:class:`MaskedArray`, and an example masking application in
`examples/imdb.py`.
Args:
fn: function returning a :class:`MaskedArray`.
Returns:
Function of same signature as `fn`, where the output :class:`MaskedArray` is
replaced with the :class:`jax.numpy.ndarray` with masked entries zeroed-out.
"""
def unmask(x: Union[MaskedArray, np.ndarray]) -> np.ndarray:
if isinstance(x, MaskedArray):
x = utils.mask(x.masked_value, x.mask)
return x # pytype: disable=bad-return-type # jax-ndarray
def is_leaf(x) -> bool:
return isinstance(x, (np.ndarray, MaskedArray))
@utils.wraps(fn)
def fn_no_mask(*args, **kwargs):
out = fn(*args, **kwargs)
out = tree_map(unmask, out, is_leaf=is_leaf)
return out
return fn_no_mask
# INTERNAL UTILITIES
@nt_dataclasses.dataclass
class MaskedArray:
"""A dataclass representing a masked :class:`jax.numpy.ndarray` or a `PyTree`.
This type may be returned by an `apply_fn` if you provide the
`masked_constant` argument, i.e. indicate that values of `x` equal to
`masked_constant` are considered as masked. In this case the output of the
`apply_fn` will be a :class:`MaskedArray`, containing information about which
output entries are considered masked.
See Also:
:obj:`unmask_fn`, and an example masking application in `examples/imdb.py`.
Attributes:
masked_value:
:class:`jax.numpy.ndarray` or a `PyTree` with values.
mask:
a boolean :class:`jax.numpy.ndarray` or a `PyTree` with `True` indicating
that the respective entry in `masked_value` is considered masked.
"""
masked_value: PyTree
mask: PyTree
def _get_masked_array(
x: Union[None, np.ndarray, ShapedArray, MaskedArray],
mask_constant: Optional[float] = None
) -> MaskedArray:
"""Return `x` with entries equal to `mask_constant` zeroed-out, and the mask.
The mask returned is a boolean `np.ndarray` with masked indices having `True`.
Args:
x:
`np.ndarray` to mask. If `x` is a :class:`MaskedArray`, treat it as
`(masked_x, mask)` and pass it through.
mask_constant: an optional `float`, the value in inputs to be considered as
masked (e.g. padding in a batch of sentences). `None` means no masking.
Can also be `np.nan`, `np.inf` etc.
Returns:
A :class:`MaskedArray` of `(masked_x, boolean_mask)`.
"""
if x is None:
mask_mat = None
elif isinstance(x, MaskedArray):
x, mask_mat = x.masked_value, x.mask
elif isinstance(x, (onp.ndarray, np.ndarray, float, int)):
if mask_constant is None:
mask_mat = None
else:
mask_mat = lax.cond(np.isnan(mask_constant),
np.isnan,
lambda x: x == mask_constant,
x)
else:
raise TypeError(x, type(x))
x = utils.mask(x, mask_mat)
return MaskedArray(x, mask_mat) # pytype: disable=wrong-arg-count
_INPUT_REQ = 'input_req'
def get_req(
f: Callable,
default: Optional[frozendict.frozendict] = None) -> frozendict.frozendict:
return getattr(f, _INPUT_REQ, default)
def _set_req(f: Callable, req: frozendict.frozendict):
setattr(f, _INPUT_REQ, req)
def _has_req(f: Callable) -> bool:
return hasattr(f, _INPUT_REQ)
_DEFAULT_INPUT_REQ = frozendict.frozendict(
{
'diagonal_batch': True,
'diagonal_spatial': False,
'batch_axis': 0,
'use_dropout': False,
'channel_axis': -1,
'mask_constant': None
}
)
class Bool(enum.IntEnum):
"""Helper trinary logic class. See :class:`Diagonal` for details.
Attributes:
NO:
`False`.
MAYBE:
Maybe.
YES:
`True`.
"""
NO = 0
MAYBE = 1
YES = 2
def __and__(self, other: 'Bool') -> 'Bool':
return min(self, other)
__rand__ = __and__
@dataclasses.dataclass(frozen=True)
class Diagonal:
"""Helps decide whether to allow the kernel to contain diagonal entries only.
The intended behavior is to be diagonal-only iff
a) output off-diagonal entries are all zeros, and
b) diagonal-only :class:`~neural_tangents.Kernel` is sufficient for all
steps of computation.
Note that currently this parameter is shared between all parallel branches,
even if this is excessive, and it is defined once for the whole network and
does not change from layer to layer, even if it could be possible.
Must be endowed with
1) A commutative, associative, idempotent `AND` (`&`) operation,
corresponding to combining requirements of two layers in parallel.
2) An associative composition `>>` operation, corresponding to the
requirement of a composition of two layers.
Attributes:
input:
specifies whether inputs to given layer can contain only diagonal
entries. :attr:`Bool.YES` means "yes"; :attr:`Bool.MAYBE` means iff
off-diagonal entries are zero. :attr:`Bool.NO` means "no". When
traversing the network tree from inputs to outputs (as well as parallel
branches from left/right to right/left) can only decrease.
output:
specifies whether any outputs (starting from this layer to the output of
the network) can contain only diagonal entries. :attr:`Bool.YES` means
yes; :attr:`Bool.MAYBE` means "yes" after current layer, but may become
"no" further in the network. :attr:`Bool.NO` means "no".
"""
input: Bool = Bool.YES
output: Bool = Bool.NO
def __rshift__(self, other: 'Diagonal') -> 'Diagonal':
"""Associative composition (`self >> other`) operation.
Args:
other:
lhs.
Returns:
The requirement satisfied by composition `other(self(.))`.
"""
if self.output == Bool.YES:
return self
if self.output > Bool.NO and other.input > Bool.NO:
input = self.input
elif self.output == Bool.NO and other.input < Bool.YES:
input = Bool.NO
else:
input = min(self.input, other.input)
return Diagonal(input=input, output=other.output)
def __and__(self, other: 'Diagonal') -> 'Diagonal':
"""Commutative, associative, and idempotent `AND` operation.
Args:
other:
lhs/rhs.
Returns:
The largest value allowed both `self` and `other`.
"""
return Diagonal(input=self.input & other.input,
output=self.output & other.output)
def __bool__(self) -> bool:
"""Convert to `diagonal_spatial` / `diagonal_batch` `Kernel` attribute."""
return self.input == Bool.YES and self.output > Bool.NO
def __lshift__(self, other: 'Diagonal') -> 'Diagonal':
"""Associative composition (`self << other`) operation.
Args:
other:
lhs.
Returns:
The value allowed by composition `self(other(.))`.
"""
return other >> self
__rand__ = __and__
def _cov_diag_batch_diag_spatial(x: np.ndarray,
batch_axis: int,
channel_axis: int) -> np.ndarray:
ret = np.sum(x ** 2, axis=channel_axis)
new_batch_axis = batch_axis - (1 if batch_axis > channel_axis else 0)
ret = np.moveaxis(ret, new_batch_axis, 0)
return ret
def _cov_diag_batch_full_spatial(x: np.ndarray,
batch_axis: int,
channel_axis: int) -> np.ndarray:
ret = lax.dot_general(x, x,
(((channel_axis,), (channel_axis,)),
((batch_axis,), (batch_axis,)))
)
ret = utils.zip_axes(ret, 1)
return ret
def _cov_full_batch_full_spatial(x1: np.ndarray,
x2: np.ndarray,
batch_axis: int,
channel_axis: int) -> np.ndarray:
ret = np.tensordot(x1, x2, (channel_axis, channel_axis))
new_batch_axis = batch_axis - (1 if batch_axis > channel_axis else 0)
ret = np.moveaxis(ret, (new_batch_axis, x1.ndim - 1 + new_batch_axis), (0, 1))
ret = utils.zip_axes(ret, 2)
return ret
def _cov_full_batch_diag_spatial(x1: np.ndarray,
x2: np.ndarray,
batch_axis: int,
channel_axis: int) -> np.ndarray:
diag_axes = tuple(i for i in range(x1.ndim)
if i != batch_axis and i != channel_axis)
ret = lax.dot_general(x1, x2,
(((channel_axis,), (channel_axis,)),
(diag_axes, diag_axes))
)
ret = np.moveaxis(ret, (-2, -1), (0, 1))
return ret
def _cov_diag_batch(x: np.ndarray,
diagonal_spatial: bool,
batch_axis: int,
channel_axis: int) -> np.ndarray:
if diagonal_spatial:
ret = _cov_diag_batch_diag_spatial(x, batch_axis, channel_axis)
else:
ret = _cov_diag_batch_full_spatial(x, batch_axis, channel_axis)
return ret / x.shape[channel_axis]
def _cov(
x1: np.ndarray,
x2: Optional[np.ndarray],
diagonal_spatial: bool,
batch_axis: int,
channel_axis: int) -> Optional[np.ndarray]:
"""Computes uncentered covariance (nngp) between two batches of inputs.
Args:
x1:
a (2+S)D (S >= 0) `np.ndarray` of shape
`(batch_size_1, <S spatial dimensions>, n_channels)`. `batch_size_1`,
`n_channels` may be in different positions based on `batch_axis` and
`channel_axis`.
x2:
an optional `np.ndarray` that has the same shape as `a` apart from
possibly different batch (`batch_size_2`) dimension. `None` means
`x2 == x1`.
diagonal_spatial:
Specifies whether only the diagonals of the
location-location covariances will be computed,
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, depth, ...)`),
or the full covariance
(`diagonal_spatial == False`,
`nngp.shape == (batch_size_1, batch_size_2, height, height,
width, width, depth, depth, ...)`).
batch_axis:
Specifies which axis is the batch axis.
channel_axis:
Specifies which axis is the channel / feature axis. For `kernel_fn`,
channel size is considered to be infinite.
Returns:
Matrix of uncentred batch covariances with shape
`(batch_size_1, batch_size_2, <S spatial dimensions>)`
if `diagonal_spatial` is `True`, or
`(batch_size_1, batch_size_2, <2*S spatial dimensions>)`
if `diagonal_spatial` is `False`.
"""
x2 = x1 if x2 is None else x2
if diagonal_spatial:
ret = _cov_full_batch_diag_spatial(x1, x2, batch_axis, channel_axis)
else:
ret = _cov_full_batch_full_spatial(x1, x2, batch_axis, channel_axis)
return ret / x1.shape[channel_axis]
def _inputs_to_kernel(
x1: np.ndarray,
x2: Optional[np.ndarray],
*,
diagonal_batch: bool,
diagonal_spatial: Union[bool, Diagonal],
compute_ntk: bool,
batch_axis: int,
channel_axis: Optional[int],
mask_constant: Optional[float],
eps: float = 1e-12,
**kwargs
) -> Kernel:
"""Transforms (batches of) inputs to a `Kernel`.
This is a private function. Docstring and example are for internal reference.
The kernel contains the empirical covariances between different inputs and
their entries (e.g. pixels, words, entries in a time series etc.) necessary
to compute the covariance of the Gaussian Process corresponding to an
infinite Bayesian or continuous gradient descent trained neural network.
The smallest necessary number of covariance entries is tracked. For example,
all networks are assumed to have i.i.d. weights along the channel / feature
/ logits dimensions, hence covariance between different entries along these
dimensions is known to be 0 and is not tracked.
Example:
>>> x = np.ones((10, 32, 16, 3))
>>> o = _inputs_to_kernel(x, None,
>>> diagonal_batch=True,
>>> diagonal_spatial=False,
>>> compute_ntk=True,
>>> batch_axis=0,
>>> channel_axis=-1)
>>> o.cov1.shape, o.ntk.shape
(10, 32, 32, 16, 16), (10, 10, 32, 32, 16, 16)
>>> o = _inputs_to_kernel(x, None,
>>> diagonal_batch=True,
>>> diagonal_spatial=True,
>>> compute_ntk=True,
>>> batch_axis=0,
>>> channel_axis=-1)
>>> o.cov1.shape, o.ntk.shape
(10, 32, 16), (10, 10, 32, 16)
>>> x1 = np.ones((10, 128))
>>> x2 = np.ones((20, 128))
>>> o = _inputs_to_kernel(x1, x2,
>>> diagonal_batch=True,
>>> diagonal_spatial=True,
>>> compute_ntk=False,
>>> batch_axis=0,
>>> channel_axis=-1)
>>> o.cov1.shape, o.nngp.shape
(10,), (10, 20)
Args:
x1:
an `(S+2)`-dimensional `np.ndarray` of shape
`(batch_size_1, height, width, depth, ..., n_channels)` with `S` spatial
dimensions (`S >= 0`). Dimensions may be in different order based on
`batch_axis` and `channel_axis`.
x2:
an optional `np.ndarray` with the same shape as `x1` apart from possibly
different batch size. `None` means `x2 == x1`.
diagonal_batch:
Specifies whether `cov1` and `cov2` store only
the diagonal of the sample-sample covariance
(`diagonal_batch == True`,
`cov1.shape == (batch_size_1, ...)`),
or the full covariance
(`diagonal_batch == False`,
`cov1.shape == (batch_size_1, batch_size_1, ...)`).
diagonal_spatial:
Specifies whether all (`cov1`, `ntk`, etc.) input covariance matrcies
should store only the diagonals of the location-location covariances
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, depth, ...)`),
or the full covariance
(`diagonal_spatial == False`,
`nngp.shape == (batch_size_1, batch_size_2, height, height,
width, width, depth, depth, ...)`).
compute_ntk:
`True` to compute both NTK and NNGP kernels, `False` to only compute NNGP.
batch_axis:
Specifies which axis is the batch axis.
channel_axis:
Specifies which axis is the channel / feature axis. For `kernel_fn`,
channel size is considered to be infinite.
mask_constant:
an optional `float`, the value in inputs to be considered as masked (e.g.
padding in a batch of sentences). `None` means no masking. Can also be
`np.nan`, `np.inf` etc. Beware of floating point precision errors and try
to use an atypical for inputs value.
eps:
a small number used to check whether x1 and x2 are the same up to `eps`.
**kwargs:
other arguments passed to all intermediary `kernel_fn` calls (not used
here).
Returns:
The :class:`~neural_tangents.Kernel` object containing inputs covariance[s].
"""
if not (isinstance(x1, (onp.ndarray, np.ndarray)) and
(x2 is None or isinstance(x2, (onp.ndarray, np.ndarray)))):
raise TypeError(('Wrong input types given. Found `x1` of type '
f'{type(x1)} and `x2` of type {type(x2)}, need both to be'
f'`np.ndarray`s (`x2` can be `None`).'))
batch_axis %= x1.ndim
diagonal_spatial = bool(diagonal_spatial)
if batch_axis != 0:
# TODO(romann): add support or clear error for batching.
warnings.warn(f'!!! Non-leading (!= 0) batch dimension in the '
f'input layer is not supported for batching '
f'kernels, got batch_axis = {batch_axis}. !!!')
if channel_axis is None:
def flatten(x):
if x is None:
return x
return np.moveaxis(x, batch_axis, 0).reshape((x.shape[batch_axis], -1))
x1, x2 = flatten(x1), flatten(x2)
batch_axis, channel_axis = 0, 1
diagonal_spatial = False
else:
channel_axis %= x1.ndim
def get_x_cov_mask(x):
if x is None:
return None, None, None
if x.ndim < 2:
raise ValueError(f'Inputs must be at least 2D (a batch dimension and a '
f'channel/feature dimension), got {x.ndim}.')
x = _get_masked_array(x, mask_constant)
x, mask = x.masked_value, x.mask
# TODO(schsam): Think more about dtype automatic vs manual dtype promotion.
x = x.astype(jax.dtypes.canonicalize_dtype(np.float64))
if diagonal_batch:
cov = _cov_diag_batch(x, diagonal_spatial, batch_axis, channel_axis)
else:
cov = _cov(x, x, diagonal_spatial, batch_axis, channel_axis)
return x, cov, mask
x1, cov1, mask1 = get_x_cov_mask(x1)
x2, cov2, mask2 = get_x_cov_mask(x2)
nngp = _cov(x1, x2, diagonal_spatial, batch_axis, channel_axis)
ntk = np.zeros((), nngp.dtype) if compute_ntk else None # pytype: disable=attribute-error # always-use-return-annotations
is_gaussian = False
is_reversed = False
x1_is_x2 = utils.x1_is_x2(x1, x2, eps=eps)
is_input = False
return Kernel(cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=is_input,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
shape1=x1.shape,
shape2=x1.shape if x2 is None else x2.shape,
batch_axis=batch_axis,
channel_axis=channel_axis,
mask1=mask1,
mask2=mask2) # pytype:disable=wrong-keyword-args
def _propagate_shape(init_fn: InitFn,
apply_fn: ApplyFn,
shaped: ShapedArray,
**kwargs) -> ShapedArray:
"""Statically, abstractly, evaluate the init_fn to get shape information."""
def init_and_apply(rng, x):
_, params = init_fn(rng, tree_map(lambda x: x.shape, x))
return apply_fn(params, x, rng=rng, **kwargs)
akey = ShapedArray((2,), np.uint32)
try:
shaped = eval_shape(init_and_apply, akey, shaped)
except NotImplementedError:
# Some layers do not implement an `apply_fn` and in this case we keep the
# shape constant.
pass
if isinstance(shaped, MaskedArray):
shaped = shaped.masked_value
return shaped
def _set_shapes(init_fn: InitFn,
apply_fn: ApplyFn,
in_kernel: NTTree[Kernel],
out_kernel: NTTree[Kernel],
**kwargs
) -> NTTree[Kernel]:
"""Apply a kernel_fn to a Kernel propagating side information."""
is_leaf = lambda k: isinstance(k, Kernel)
shape1 = tree_map(lambda k: ShapedArray(k.shape1, k.nngp.dtype),
in_kernel, is_leaf=is_leaf)
shape2 = tree_map(lambda k: ShapedArray(k.shape2, k.nngp.dtype),
in_kernel, is_leaf=is_leaf)
kwargs1, kwargs2 = utils.split_kwargs(kwargs)
shape1 = _propagate_shape(init_fn, unmask_fn(apply_fn), shape1, **kwargs1)
shape2 = _propagate_shape(init_fn, unmask_fn(apply_fn), shape2, **kwargs2)
set_shape_fn = lambda k, s1, s2: k.replace(shape1=s1.shape, shape2=s2.shape)
return tree_map(set_shape_fn, out_kernel, shape1, shape2, is_leaf=is_leaf)
def _fuse_requirements(
kernel_fn_reqs,
default_reqs,
**user_reqs
) -> frozendict.frozendict:
# Override static requirements with explicit user-specified requirements,
# but only if they are less demanding, raise an error otherwise.
kernel_fn_reqs = dict(kernel_fn_reqs)
for k, v_user in user_reqs.items():
if v_user is not None:
if k in kernel_fn_reqs:
v_kernel = kernel_fn_reqs[k]
if (v_user is True and
(v_kernel is False or
(isinstance(kernel_fn_reqs[k], Diagonal) and
kernel_fn_reqs[k].input == Bool.NO))):
raise ValueError(f'Asked to compute `kernel_fn` output with '
f'`{k} == {v_user}`, while `kernel_fn` '
f'requires `{k} == {kernel_fn_reqs[k]}`.')
kernel_fn_reqs[k] = v_user
# Fill unspecified requirements with defaults.
for k, v_user in default_reqs.items():
if k not in kernel_fn_reqs:
kernel_fn_reqs[k] = v_user
return frozendict.frozendict(kernel_fn_reqs)
def _preprocess_kernel_fn(
init_fn: InitFn,
apply_fn: ApplyFn,
kernel_fn: LayerKernelFn
) -> AnalyticKernelFn:
"""Returns a `kernel_fn` with additional arguments.
Args:
init_fn: layer parameters initialization function. Used for shape inference.
apply_fn: layer forward-prop function. Used for shape inference.
kernel_fn: the `Kernel` -> `Kernel` layer propagation function.
Returns:
A new `kernel_fn` that does the same computation but accepts additional
arguments to flexibly specify the required computation, and can be applied
to either a `Kernel' or a pair of `np.ndarrray`s.
"""
# Set empty requirements if none specified.
if not _has_req(kernel_fn):
kernel_fn = requires()(kernel_fn)
def kernel_fn_kernel(kernel, **kwargs):
out_kernel = kernel_fn(kernel, **kwargs)
return _set_shapes(init_fn, apply_fn, kernel, out_kernel, **kwargs)
def kernel_fn_x1(x1, x2, get, **kwargs):
# Get input requirements requested by network layers, user, or defaults.
kernel_fn_reqs = get_req(kernel_fn)
reqs = _fuse_requirements(kernel_fn_reqs, _DEFAULT_INPUT_REQ, **kwargs)
compute_ntk = (get is None) or ('ntk' in get)
if x2 is None:
x2 = tree_map(lambda x: None, x1)
def input_fn(x1, x2):
return _inputs_to_kernel(x1, x2, compute_ntk=compute_ntk, **reqs)
kernel = tree_map(input_fn, x1, x2)
out_kernel = kernel_fn(kernel, **kwargs)
return _set_shapes(init_fn, apply_fn, kernel, out_kernel, **kwargs)
@utils.get_namedtuple('AnalyticKernel')
def kernel_fn_any(x1_or_kernel: Union[NTTree[np.ndarray], NTTree[Kernel]],
x2: Optional[NTTree[np.ndarray]] = None,
get: Optional[Get] = None,
*,
pattern: Optional[Tuple[Optional[np.ndarray],
Optional[np.ndarray]]] = None,
mask_constant: Optional[float] = None,
diagonal_batch: Optional[bool] = None,
diagonal_spatial: Optional[bool] = None,
**kwargs):
"""Returns the `Kernel` resulting from applying `kernel_fn` to given inputs.
Args:
x1_or_kernel:
either an NTTree of the first batch of inputs.
x2:
an optional NTTree of `np.ndarray` with the second batch of inputs.
`None` means `x2 == x1` or `x1_or_kernel is Kernel`.
get:
either `None`, a string, or a tuple of strings specifying which data
should be returned by the kernel function. Can be "nngp", "ntk", "cov1",
"cov2", "is_gaussian", "is_reversed", "diagonal_batch",
"diagonal_spatial", etc.
pattern:
either `None` or a tuple of two `np.ndarray`. The
`pattern = (pattern1, pattern2)` is used to specify how the nodes in a
graphical network is aggregated.
mask_constant:
an optional `float`, the value in inputs to be considered
as masked (e.g. padding in a batch of sentences). `None` means no
masking. Can also be `np.nan`, `np.inf` etc. Beware of floating point
precision errors and try to use an atypical for inputs value.
diagonal_batch:
an optional boolean specifying whether `cov1` and `cov2` in all
intermediary layers should store only the diagonal of the
sample-sample covariance
(`diagonal_batch == True`,
`cov1.shape == (batch_size_1, ...)`),
or the full covariance
(`diagonal_batch == False`,
`cov1.shape == (batch_size_1, batch_size_1, ...)`).
Defaults to least compute-heavy setting necessary to compute the output
`nngp` [and `ntk`] covariance.
diagonal_spatial:
an optional boolean specifying whether all (`cov1`, `ntk`, etc.)
covariance matrcies in all intermediary layers should store only the
diagonals of the location-location covariances
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, ...)`),
or the full covariance
(`diagonal_spatial == False`,
`nngp.shape == (batch_size_1, batch_size_2, height, height,
width, width, ...)`).
Defaults to least compute-heavy setting necessary to compute the output
`nngp` [and `ntk`] covariance.
**kwargs:
other arguments passed to all intermediary `kernel_fn` calls.
Returns:
If `get` is a string, returns the requested `np.ndarray`. If `get` is a
tuple, returns an `AnalyticKernel` namedtuple containing only the
requested information. If `get` is `None` then a `Kernel` object is
returned containing all the data.
"""
def all_of(x, cls: Type) -> bool:
def is_leaf(x) -> bool:
return isinstance(x, (Kernel, np.ndarray, onp.ndarray))
return tree_all(
tree_map(
lambda x: isinstance(x, cls),
x,
is_leaf=is_leaf)
)
if all_of(x1_or_kernel, Kernel) and x2 is None:
return kernel_fn_kernel(x1_or_kernel,
pattern=pattern,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
**kwargs)
return kernel_fn_x1(x1_or_kernel, x2, get,
pattern=pattern,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
mask_constant=mask_constant,
**kwargs)
_set_req(kernel_fn_any, get_req(kernel_fn))
return kernel_fn_any
def get_diagonal(
cov: Optional[np.ndarray],
diagonal_batch: bool,
diagonal_spatial: bool
) -> Optional[np.ndarray]:
"""Extracts the diagonal of `cov` over all (sample, spatial) dimensions.
Adapts computation if `cov` already stores only the diagonal along some
dimensions based on `diagonal_batch` and `diagonal_spatial`.
"""
if cov is None:
return cov
batch_ndim = 1 if diagonal_batch else 2
start_axis = 2 - batch_ndim
end_axis = batch_ndim if diagonal_spatial else cov.ndim
cov = utils.unzip_axes(cov, start_axis, end_axis)
return utils.diagonal_between(cov, start_axis, end_axis)
def get_diagonal_outer_prods(
cov1: np.ndarray,
cov2: Optional[np.ndarray],
diagonal_batch: bool,
diagonal_spatial: bool,
operation: Callable[[float, float], float],
axis: Sequence[int] = (),
mask1: Optional[np.ndarray] = None,
mask2: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Gets outer products of diagonals `cov1, cov1`, `cov1, cov2`, `cov2, cov2`.
`prod11[x1, x2, h1, h2, ...]` =
cov1[x1, [x1,], h1, [h1,], ...] * cov1[x2, [x2,], h2, [h2,], ...]`,
`prod12[x1, x2, h1, h2, ...]` =
cov1[x1, [x1,], h1, [h1,], ...] * cov2[x2, [x2,], h2, [h2,], ...]`,
`prod22[x1, x2, h1, h2, ...]` =
cov2[x1, [x1,], h1, [h1,], ...] * cov2[x2, [x2,], h2, [h2,], ...]`.
Exact shapes of `cov1` and `cov2` are defined by `diagonal_batch` and
`diagonal_spatial`.
"""
axis = utils.canonicalize_axis(axis, cov1)
cov1 = get_diagonal(cov1, diagonal_batch, diagonal_spatial)
cov2 = get_diagonal(cov2, diagonal_batch, diagonal_spatial)
cov1, _ = mean_and_var(cov1, axis=axis, keepdims=True, mask=mask1)
cov2, _ = mean_and_var(cov2, axis=axis, keepdims=True, mask=mask2)
end_axis = 1 if diagonal_spatial else cov1.ndim
prod12 = utils.outer_prod(cov1, cov2, 0, end_axis, operation)
start_axis = 1 if diagonal_batch else 0
prod11 = utils.outer_prod(cov1, cov1, start_axis, end_axis, operation)
prod22 = (utils.outer_prod(cov2, cov2, start_axis, end_axis, operation)
if cov2 is not None else prod11)
return prod11, prod12, prod22
def mean_and_var(
x: Optional[np.ndarray],
axis: Optional[Axes] = None,
dtype: Optional[np.dtype] = None,
out: Optional[None] = None,
ddof: int = 0,
keepdims: bool = False,
mask: Optional[np.ndarray] = None,
get_var: bool = False
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
"""`np.mean` and `np.var` taking the `mask` information into account."""
var = None
if x is None:
return x, var
if mask is None:
mean = np.mean(x, axis, dtype, out, keepdims)
if get_var:
var = np.var(x, axis, dtype, out, ddof, keepdims)
else:
axis = tuple(utils.canonicalize_axis(axis, x))
size = utils.size_at(x, axis)
mask = np.broadcast_to(mask, x.shape)
mask_size = np.count_nonzero(mask, axis)
for i in axis:
mask_size = np.expand_dims(mask_size, i)
size -= mask_size
size = np.maximum(size, 1)
mean = np.sum(x, axis=axis, keepdims=True) / size
if not keepdims:
mean = np.squeeze(mean, axis)
if get_var:
var = np.sum((x - mean)**2, axis=axis, keepdims=True) / (size - ddof)
if not keepdims:
var = np.squeeze(var, axis)
return mean, var
| 39,172 | 33.913547 | 125 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/stax/linear.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear functions."""
import enum
import functools
import operator as op
import string
from typing import Callable, Iterable, Optional, Sequence, Tuple, Union
import warnings
import jax
from jax import lax
from jax import numpy as np
from jax import ops
from jax import random
from jax import ShapeDtypeStruct, eval_shape, vmap
from jax.core import ShapedArray
import jax.example_libraries.stax as ostax
import numpy as onp
from .requirements import Bool, Diagonal, get_diagonal_outer_prods, layer, mean_and_var, requires, supports_masking
from ..utils import utils
from ..utils.kernel import Kernel
from ..utils.typing import Axes, InternalLayer, InternalLayerMasked, PyTree
# Enums
class Padding(enum.Enum):
"""Type of padding in pooling and convolutional layers.
Attributes:
CIRCULAR:
circular padding, as if the input were a torus.
SAME:
same, a.k.a. zero padding.
VALID:
valid, a.k.a. no padding.
"""
CIRCULAR = 'CIRCULAR'
SAME = 'SAME'
VALID = 'VALID'
class _Pooling(enum.Enum):
"""Type of pooling in pooling layers.
Attributes:
AVG:
average pooling, the output is normalized by the input receptive field
size.
SUM:
sum pooling, no normalization.
"""
AVG = 'AVG'
SUM = 'SUM'
class AggregateImplementation(enum.Enum):
"""Implementation of the :obj:`Aggregate` layer.
See :obj:`Aggregate` docstring for details.
Attributes:
DENSE:
Is recommended for dense graphs, where the number of edges `E` is
proportional to the number of vertices `V` to the power of 1.5 or more.
SPARSE:
Is recommended for sparse graphs, where `E ~ O(V)` or less.
"""
DENSE = 'DENSE'
SPARSE = 'SPARSE'
# LAYERS
@layer
@supports_masking(remask_kernel=False)
def Identity() -> InternalLayer:
"""Identity (no-op).
Based on :obj:`jax.example_libraries.stax.Identity`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.Identity
kernel_fn = lambda k, **kwargs: k
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=False)
def DotGeneral(
*,
lhs: Optional[Union[np.ndarray, float]] = None,
rhs: Optional[Union[np.ndarray, float]] = None,
dimension_numbers: lax.DotDimensionNumbers = (((), ()), ((), ())),
precision: Optional[lax.Precision] = None,
batch_axis: int = 0,
channel_axis: int = -1
) -> InternalLayerMasked:
r"""Constant (non-trainable) rhs/lhs Dot General.
Dot General allows to express any linear transformation on the inputs,
including but not limited to matrix multiplication, pooling, convolutions,
permutations, striding, masking etc (but specialized implementations are
typically much more efficient).
Returned `apply_fn` is calling
`jax.lax.dot_general(inputs, rhs, dimension_numbers, precision)` or
`jax.lax.dot_general(lhs, inputs, dimension_numbers, precision)`, depending
on whether `lhs` or `rhs` is specified (not `None`).
Example:
>>> from jax import random
>>> import jax.numpy as np
>>> from neural_tangents import stax
>>> #
>>> # Two time series stacked along the second (H) dimension.
>>> x = random.normal(random.PRNGKey(1), (5, 2, 32, 3)) # NHWC
>>> #
>>> # Multiply all outputs by a scalar:
>>> nn = stax.serial(
>>> stax.Conv(128, (1, 3)),
>>> stax.Relu(),
>>> stax.DotGeneral(rhs=2.), # output shape is (5, 2, 30, 128)
>>> stax.GlobalAvgPool() # (5, 128)
>>> )
>>> #
>>> # Subtract second time series from the first one:
>>> nn = stax.serial(
>>> stax.Conv(128, (1, 3)),
>>> stax.Relu(),
>>> stax.DotGeneral(
>>> rhs=np.array([1., -1.]),
>>> dimension_numbers=(((1,), (0,)), ((), ()))), # (5, 30, 128)
>>> stax.GlobalAvgPool() # (5, 128)
>>> )
>>> #
>>> # Flip outputs with each other
>>> nn = stax.serial(
>>> stax.Conv(128, (1, 3)),
>>> stax.Relu(),
>>> stax.DotGeneral(
>>> lhs=np.array([[0., 1.], [1., 0.]]),
>>> dimension_numbers=(((1,), (1,)), ((), ()))), # (5, 2, 30, 128)
>>> stax.GlobalAvgPool() # (5, 128)
>>> )
See Also:
https://www.tensorflow.org/xla/operation_semantics#dotgeneral
Args:
lhs:
a constant array to dot with. `None` means layer `inputs` are the
left-hand side.
rhs:
a constant array to dot with. `None` means layer `inputs` are the
right-hand side. If both `lhs` and `rhs` are `None` the layer is the same
as `Identity`.
dimension_numbers:
a tuple of tuples of the form `((lhs_contracting_dims,
rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`.
precision:
Optional. Either `None`, which means the default precision for the
backend, or a `lax.Precision` enum value (`Precision.DEFAULT`,
`Precision.HIGH` or `Precision.HIGHEST`).
batch_axis:
batch axis for `inputs`. Defaults to `0`, the leading axis. Can be present
in `dimension_numbers`, but contraction along `batch_axis` will not allow
for further layers to be applied afterwards.
channel_axis:
channel axis for `inputs`. Defaults to `-1`, the trailing axis. For
`kernel_fn`, channel size is considered to be infinite. Cannot be present
in `dimension_numbers`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if rhs is not None and lhs is not None:
raise ValueError('At most one of constant `rhs` and `lhs` can be non-`None`'
', since the other factor is considered to be the layer '
'`inputs`.')
is_lhs = rhs is None
other = np.array(lhs if is_lhs else rhs)
def dot_fn(x):
args = (x, other.astype(x.dtype))[::(-1 if is_lhs else 1)]
return lax.dot_general(*args, dimension_numbers, precision)
def init_fn(rng, input_shape):
out = eval_shape(dot_fn, ShapeDtypeStruct(input_shape, other.dtype))
return out.shape, ()
def apply_fn(params, inputs, **kwargs):
return dot_fn(inputs)
# If a dimension is contracted, respective pairwise covariances are needed to
# compute the covariance of contractions.
input_cs = dimension_numbers[0][1 if is_lhs else 0]
diagonal_batch = (batch_axis not in input_cs) or (rhs is None and lhs is None)
diagonal_spatial = Diagonal(
input=Bool.YES
if (input_cs in ((), (batch_axis,)) or (rhs is None and lhs is None))
else Bool.NO)
@requires(diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
batch_axis=batch_axis,
channel_axis=channel_axis)
def kernel_fn(k: Kernel, **kwargs) -> Kernel:
return k.dot_general(other, other, is_lhs, dimension_numbers)
def mask_fn(mask, input_shape):
mask_shape = list(input_shape)
mask_shape[channel_axis] = mask.shape[channel_axis]
return ~dot_fn(~np.broadcast_to(mask, mask_shape))
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=True)
def Aggregate(
aggregate_axis: Optional[Axes] = None,
batch_axis: int = 0,
channel_axis: int = -1,
to_dense: Optional[Callable[[np.ndarray], np.ndarray]] = lambda p: p,
implementation: str = AggregateImplementation.DENSE.value
) -> InternalLayer:
r"""Aggregation operator (graphical neural network).
See e.g.
"`Graph Neural Tangent Kernel: Fusing Graph Neural Networks with Graph Kernels
<https://arxiv.org/abs/1905.13192>`_".
Specifically, each `N+2`-D `input` of shape `(batch, X_1, ..., X_N, channels)`
(subject to `batch_axis` and `channel_axis`) is accompanied by an array
`pattern` specifying the directed edges (arcs, arrows) of the graph. The
format of `pattern` depends on `implementation`:
`implementation = "DENSE"`:
Is recommended for dense graphs, where the number of
edges `E` is proportional to the number of vertices `V` to the power of 1.5
or more. In this case, `pattern` is a [weighted] adjacency 2-adjacency
`2K+1`-D tensor of shape `(batch, X_i1, ..., X_iK, X_i1, ..., X_iK)` (i.e.
leading batch dimensions, repeated spatial dimensions, no channel dimension)
and the output tensor is
`lax.dot_general(inputs, pattern, ((aggregate_axes, range(1, K + 1)),
(batch_axis,), (0,)))` with the `batch_axis` and `channel_axis` preserved.
`K = len(aggregate_axes)`.
Having `pattern[n, i1, ..., iK, j1, ..., jK] == w` represents a directed
edge (arc) from tail pixel / token `(i1, ..., iK)` to head `(j1, ..., jK)`
with weight `w` in an individual input sample `n`. The `apply_fn` of this
layer replaces all vertices with the (weighted) sum of all direct
predecessors to the given vertex.
Note that individual inputs can have more than `K` dimensions (e.g.
channels, other coordinates), in which case slices along these coordinates
are processed in the same way independently.
This implementation uses matrix multiplication, and for a graph with `V`
vertices and `E` edges, `apply_fn` costs `O(V^2)` memory and time, while
`kernel_fn` costs `O(V^2)` memory and `O(V^3)` time.
The adjacency tensor `pattern` can be specified in a sparse format. If
you provide a `to_dense` function (defaults to identity), then `pattern` is
decoded into a dense representation as described above
(`pattern_dense = to_dense(pattern)`) each time `apply_fn` or `kernel_fn`
are called. This avoids storing the whole graph in the dense format in
advance, but only convert it to dense format on the fly, for each
individual batch `x` / `(x1, x2)`. However, this does not improve the
runtime or memory of the `Aggregate` layer (in fact makes it a bit slower
due to an extra `to_dense` call).
`implementation = "SPARSE"`:
Is recommended for sparse graphs, where `E ~ O(V)` or less. In this case,
`pattern` must be an integer array of shape `(batch, n_edges, K, 2)`,
specifying `n_edges` directed edges (arcs) of weight `w = 1` for each of
the `batch` input samples (if `K == 1` `pattern` can also have the shape
`(batch, n_edges, 2)`). Trailing dimension of size 2 corresponds to tails
(sources, senders) and heads (targets, receivers). Edges can be repeated,
which is interpreted as having their weight be the number of repetitions.
If any of the `K` coordinates of a given vertex in `heads` is negative
(e.g. `-1`), it is discarded. This can be used for padding, when different
input samples have different `n_edges`. Note that this means you can't use
negative indexing to specify vertices.
This implementation uses :obj:`jax.ops.segment_sum` instead of matrix
multiplication. This makes `apply_fn` cost `O(V + E)` memory and `O(V + E)`
time, and `kernel_fn` cost `O(V^2)` memory and `O(V^2 + E^2 + V * E)` time.
This is beneficial for sparse graphs, i.e. `E << V^2`, but detrimental for
dense graphs (when `E ~ V^2`).
See Also:
`AggregateTest` in `tests/stax_test.py` for examples and conversion between
sparse and dense patterns.
Example:
>>> # 1D inputs
>>> x = random.normal(random.PRNGKey(1), (5, 3, 32)) # NCH
>>> #
>>> # 1) NHH dense binary adjacency matrix
>>> A = random.bernoulli(random.PRNGKey(2), 0.5, (5, 32, 32))
>>> # `A[n, h1, h2] == True`
>>> # means an edge between tokens `h1` and `h2` in sample `n`.
>>> #
>>> init_fn, apply_fn, kernel_fn = stax.Aggregate(aggregate_axis=2,
>>> batch_axis=0,
>>> channel_axis=1)
>>> #
>>> out = apply_fn((), x, pattern=A)
>>> # output is the same as `x @ A` of shape (5, 3, 32)
>>> #
>>> # Sparse NHH binary pattern with 10 edges
>>> n_edges = 10
>>> A_sparse = random.randint(random.PRNGKey(3),
>>> shape=(x.shape[0], n_edges, 1, 2),
>>> minval=0,
>>> maxval=x.shape[2])
>>> #
>>> # Setting `implementation="SPARSE"` to invoke the segment sum
>>> # implementation.
>>> init_fn, apply_fn, kernel_fn = stax.Aggregate(aggregate_axis=2,
>>> batch_axis=0,
>>> channel_axis=1,
>>> implementation="SPARSE")
>>> #
>>> out = apply_fn((), x, pattern=A_sparse)
>>> # output is of shape (5, 3, 32), computed via `jax.ops.segment_sum`.
>>> #
>>> # 2D inputs
>>> x = random.normal(random.PRNGKey(1), (5, 3, 32, 16)) # NCHW
>>> #
>>> # 2) NHWHW dense binary adjacency matrix
>>> A = random.bernoulli(random.PRNGKey(2), 0.5, (5, 32, 16, 32, 16))
>>> # `A[n, h1, w1, h2, w2] == True`
>>> # means an edge between pixels `(h1, w1)` and `(h2, w2)` in image `n`.
>>> #
>>> init_fn, apply_fn, kernel_fn = stax.Aggregate(aggregate_axis=(2, 3),
>>> batch_axis=0,
>>> channel_axis=1)
>>> #
>>> out = apply_fn((), x, pattern=A)
>>> # output is of shape (5, 3, 32, 16), the same as
>>> # `(x.reshape((5, 3, 32 * 16)) @ A.reshape((5, 32 * 16, 32 * 16))
>>> # ).reshape(x.shape)`
>>> #
>>> # 3) NWW binary adjacency matrix
>>> A = random.bernoulli(random.PRNGKey(2), 0.5, (5, 16, 16))
>>> # `A[n, w1, w2] == True`
>>> # means an edge between rows `w1` and `w2` in image `n`.
>>> #
>>> init_fn, apply_fn, kernel_fn = stax.Aggregate(aggregate_axis=(3,),
>>> batch_axis=0,
>>> channel_axis=1)
>>> #
>>> out = apply_fn((), x, pattern=A)
>>> # output is of shape (5, 3, 32, 16), the same as
>>> # `(x.reshape((5, 3 * 32, 16)) @ A).reshape(x.shape)`
>>> #
>>> # 4) Infinite width example
>>> x1 = random.normal(random.PRNGKey(1), (5, 3, 32)) # NCH
>>> x2 = random.normal(random.PRNGKey(2), (2, 3, 32)) # NCH
>>> #
>>> # NHH binary adjacency matrices
>>> A1 = random.bernoulli(random.PRNGKey(2), 0.5, (5, 32, 32))
>>> A2 = random.bernoulli(random.PRNGKey(2), 0.5, (2, 32, 32))
>>> #
>>> _, _, kernel_fn_id = stax.Identity()
>>> #
>>> _, _, kernel_fn_agg = stax.Aggregate(aggregate_axis=2,
>>> batch_axis=0,
>>> channel_axis=1)
>>> #
>>> nngp = kernel_fn_id(x1, x2, get='nngp', channel_axis=1)
>>> # initial NNGP of shape (5, 2, 32, 32)
>>> K_agg = kernel_fn_agg(x1, x2, get='nngp', pattern=(A1, A2))
>>> # output NNGP of same shape (5, 2, 32, 32):
>>> # `K_agg[n1, n2] == A1[n1].T @ nngp[n1, n2] @ A2[n2]`
Args:
aggregate_axis:
axes (non-batch and non-channel) to aggregate predecessor vertices over.
batch_axis:
batch axis for `inputs`. Defaults to `0`, the leading axis.
channel_axis:
channel axis for `inputs`. Defaults to `-1`, the trailing axis. For
`kernel_fn`, channel size is considered to be infinite.
to_dense:
Ignored unless `implementation == "DENSE"`. A function to convert
potentially sparse `pattern` matrices into dense `2K+1`-D tensors of shape
`(batch, X_i1, ..., X_iK, X_i1, ..., X_iK)`, with the batch leading
dimension, and no channel dimension, where `K = len(aggregate_axes)`.
Will be called on input `pattern` (or a pair `(pattern1, pattern2)`)
every time `apply_fn` or `kernel_fn` is called. Defaults to identity,
meaning that `pattern` is expected in the dense format.
implementation:
`"DENSE"` or `"SPARSE"`, specifying which implementation to use.
`"DENSE"` uses matrix multiplications and is recommended for dense graphs
(`E ~> O(V^1.5)`), while `"SPARSE"` uses :obj:`jax.ops.segment_sum` and is
recommended for sparse graphs (`E ~< O(V)`). Note that different
`implementation` require different `pattern` array format - see the
:obj:`Aggregate` layer docstring above for details.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
implementation = AggregateImplementation(implementation)
if implementation == AggregateImplementation.SPARSE:
warnings.warn('Negative indices in `pattern` are considered as padding '
'(i.e. ignored), unlike typical numpy negative indexing.')
init_fn = lambda rng, input_shape: (input_shape, ())
def get_agg_axes(ndim: int) -> Tuple[Tuple[int, ...], int, int]:
_batch_axis, _channel_axis = utils.mod((batch_axis, channel_axis), ndim)
if aggregate_axis is None:
agg_axes = tuple(i for i in range(ndim)
if i not in (_batch_axis, _channel_axis))
else:
agg_axes = tuple(utils.canonicalize_axis(aggregate_axis, ndim))
return agg_axes, _batch_axis, _channel_axis
def get_dimension_numbers(ndim: int) -> lax.DotDimensionNumbers:
agg_axes, batch_axis, _ = get_agg_axes(ndim)
agg_ndim = len(agg_axes)
return (agg_axes, (range(1, agg_ndim + 1))), ((batch_axis,), (0,))
@functools.partial(vmap, in_axes=(0, None))
def make_indices(index_array, agg_shape):
index_array = np.moveaxis(index_array, -1, 0)
raveled = np.ravel_multi_index(index_array, agg_shape, 'wrap')
# We mask edges where either sender or receiver is negative.
return np.where(np.all(index_array >= 0, axis=0), raveled, -1)
def get_senders_receivers(pattern, batch_size: int, agg_ndim: int):
"""Unpack `pattern` and make sure it has correct shape."""
if pattern.shape[-1] != 2:
raise ValueError('`pattern` must have a trailing dimension of 2, got '
f'{pattern.shape[-1]}.')
s, r = pattern[..., 0], pattern[..., 1]
# Allow for `(batch, n_edges, 2)` shape for single aggregation
# dimension `K == 1`.
if agg_ndim == 1 and s.ndim == 2:
s, r = np.expand_dims(s, -1), np.expand_dims(r, -1)
if s.ndim != 3:
raise ValueError(f'Tails and heads need to be 3-dimensional, '
f'got {s.ndim}.')
if s.shape[2] != agg_ndim:
raise ValueError(f'Trailing dimension of tails and heads need to have '
f'the same size as the number of aggregate axes of '
f'`aggregate_axis` ({agg_ndim}), got {s.shape[2]}.')
if s.shape[0] != batch_size:
raise ValueError(f'Tails and heads need to have leading dimension equal '
f'to batch size, got {s.shape[0]}.')
return s, r
def apply_fn(params,
inputs: np.ndarray,
*,
pattern: Optional[np.ndarray] = None,
**kwargs):
"""Compute the transformed tensors after an aggregation layer.
Args:
params:
Not used.
inputs:
An input `N+2`-D tensor of shape `(batch, X_1, ..., X_N, channels)`
(subject to `batch_axis` and `channel_axis`).
pattern:
A tensor specifying the directed edges between `inputs`. The shape and
type of `pattern` depends on `implementation` (see docstring of
`stax.Aggregate` above).
`implementation == "DENSE"`:
`pattern` must be a (float) `2K+1`-D tensor of shape
`(batch, X_i1, ..., X_iK, X_i1, ..., X_iK)`, with the batch leading
dimension, and no channel dimension, where `K = len(aggregate_axes)`.
Can have another shape (e.g. a sparse matrix), as long as
`to_dense(pattern)` has the correct (dense) shape (if `nt.batch` is
used, the leading dimension of `pattern` must be the batch dimension,
of size `batch`).
`implementation == "SPARSE"`:
`pattern` must be an integer array of shape `(batch, n_edges, K, 2)`,
specifying tail and head (source and target / sender and receiver)
vertices along the trailing dimension (if `K == 1`, `pattern` is also
allowed to have the shape `(batch, n_edges, 2)`).
`pattern=None` means identity adjacency, i.e. `apply_fn` is an identity
function.
**kwargs:
unused.
Returns:
An `N+2`-D tensor of shape of the same shape as `inputs`.
"""
if pattern is None:
return inputs
del params
ndim = inputs.ndim
agg_axes, batch_axis, channel_axis = get_agg_axes(ndim)
agg_ndim = len(agg_axes)
if implementation == AggregateImplementation.DENSE:
# Dense implementation through matrix multiplication.
pattern = to_dense(pattern)
dn = get_dimension_numbers(ndim)
out = lax.dot_general(inputs, pattern.astype(inputs.dtype), dn)
# Put back potentially displaced batch and channel axes.
out_c_axis = utils.axis_after_dot(channel_axis % ndim, dn[0][0], dn[1][0])
out_b_axis = utils.axis_after_dot(batch_axis % ndim, dn[0][0], dn[1][0])
out = np.moveaxis(out,
(out_b_axis, out_c_axis) + tuple(range(-agg_ndim, 0)),
(batch_axis, channel_axis) + agg_axes)
elif implementation == AggregateImplementation.SPARSE:
# Sparse implementation through `jax.ops.segment_sum`.
s, r = get_senders_receivers(pattern, inputs.shape[batch_axis], agg_ndim)
# Canonicalize axes
src_axes = (batch_axis,) + agg_axes + (channel_axis,)
dst_axes = (0,) + tuple(range(1, agg_ndim + 1)) + (-1,)
inputs = np.moveaxis(inputs, src_axes, dst_axes)
input_shape = inputs.shape
inputs = inputs.reshape((inputs.shape[0],
functools.reduce(
op.mul, inputs.shape[1:agg_ndim + 1], 1))
+ inputs.shape[agg_ndim + 1:])
agg_shape = input_shape[1:agg_ndim + 1]
s, r = make_indices(s, agg_shape), make_indices(r, agg_shape)
@vmap
def pass_messages(s, r, inputs):
n_nodes = inputs.shape[0]
sender_in = inputs[s]
messages = ops.segment_sum(sender_in, r, num_segments=n_nodes)
return messages
out = pass_messages(s, r, inputs)
out = out.reshape(input_shape)
out = np.moveaxis(out, dst_axes, src_axes)
else:
raise ValueError(f'Unrecognized `implementation == {implementation}.')
return out
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_spatial=Diagonal(input=Bool.NO, output=Bool.NO))
def kernel_fn(k: Kernel,
*,
pattern: Tuple[Optional[np.ndarray],
Optional[np.ndarray]] = (None, None),
**kwargs):
"""Compute the transformed kernels after an aggregation kernel layer.
Specifically, the `nngp`/`ntk` is a `2N+2`-D tensor of shape
`(B_1, B_2, X_1, X_1, ..., X_N, X_N)`.
If `implementation == "DENSE"`, this tensor will be aggregated
(via matrix multiplication) on the left by `to_dense(pattern[0])` of
shape `(B_1, X_i1, ..., X_iK)` and on the right by `to_dense(pattern[1])`
of shape `(B_2, X_i1, ..., X_iK)`. Ignoring the batch dimensions, the
output `nngp/ntk` is `pattern[0].T @ nngp/ntk @ pattern[1]`.
If `implementation == "SPARSE"`, result is computed using
`jax.ops.segment_sum` given `pattern[0]` and `pattern[1]` as integer
arrays of shapes `(B_1, n_edges_1, K, 2)` and `(B_2, n_edges_2, K, 2)`
respectively.
"""
pattern1, pattern2 = pattern
if pattern1 is None and pattern2 is None:
return k
if pattern1 is None or pattern2 is None:
raise NotImplementedError(
'Having exactly one of two `pattern1/2=None` is not implemented. '
'Please file a bug at '
'https://github.com/google/neural-tangents/issues/new.')
ndim = len(k.shape1)
agg_axes, batch_axis, channel_axis = get_agg_axes(ndim)
agg_ndim = len(agg_axes)
agg_shape = tuple(k.shape1[a] for a in agg_axes)
agg_size = functools.reduce(op.mul, agg_shape, 1)
def bucket_axes(ndim, start_axis):
"""Bucket kernel axes into batch, aggregate, and non-aggregate."""
ndim_spatial = (ndim - start_axis) // 2
agg_1 = tuple(
a - int(batch_axis < a) - int(channel_axis < a) + start_axis
for a in agg_axes)
agg_2 = tuple(
a + ndim_spatial
for a in agg_1)
non_agg_1 = tuple(
a for a in range(start_axis, start_axis + ndim_spatial)
if a not in agg_1)
non_agg_2 = tuple(
a for a in range(start_axis + ndim_spatial, ndim)
if a not in agg_2)
return tuple(range(start_axis)), agg_1, agg_2, non_agg_1, non_agg_2
if implementation == AggregateImplementation.DENSE:
# Dense implementation through matrix multiplication.
pattern1 = None if pattern1 is None else to_dense(pattern1)
pattern2 = None if pattern2 is None else to_dense(pattern2)
k = k.dot_general(
other1=pattern1,
other2=pattern2,
is_lhs=False,
dimension_numbers=get_dimension_numbers(ndim)
)
# Put back potentially displaced axes.
def transpose(k, diagonal_batch):
if k is None or k.ndim == 0:
return k
start_axis = 1 if diagonal_batch else 2
k = utils.unzip_axes(k, start_axis)
b, agg_1, agg_2, non_agg_1, non_agg_2 = bucket_axes(k.ndim, start_axis)
permutation = b + non_agg_1 + agg_1 + non_agg_2 + agg_2
k = np.transpose(k, onp.argsort(permutation))
return utils.zip_axes(k, start_axis)
k = k.replace(
cov1=transpose(k.cov1, k.diagonal_batch),
cov2=transpose(k.cov2, k.diagonal_batch),
nngp=transpose(k.nngp, False),
ntk=transpose(k.ntk, False),
batch_axis=batch_axis % ndim,
channel_axis=channel_axis % ndim
)
elif implementation == AggregateImplementation.SPARSE:
# Sparse implementation through `jax.ops.segment_sum`.
def pass_messages(s1, s2, r1, r2, k):
v1, v2 = k.shape[:2]
def send(s, r, num_segments):
return ops.segment_sum(s, r, num_segments=num_segments)
send_inner = vmap(functools.partial(send, num_segments=v2), (0, None))
k = k[s1[:, None], s2[None, :]]
k = send_inner(k, r2)
k = send(k, r1, num_segments=v1)
return k
pass_messages_self = vmap(pass_messages)
pass_messages_cross = vmap(vmap(pass_messages,
(None, 0, None, 0, 0)),
(0, None, 0, None, 0))
s1, r1 = get_senders_receivers(pattern1, k.shape1[batch_axis], agg_ndim)
s2, r2 = get_senders_receivers(pattern2, k.shape2[batch_axis], agg_ndim)
s1, r1 = make_indices(s1, agg_shape), make_indices(r1, agg_shape)
s2, r2 = make_indices(s2, agg_shape), make_indices(r2, agg_shape)
def agg(k, diagonal_batch, s1, r1, s2, r2):
if k is None or k.ndim == 0:
return k
start_axis = 1 if diagonal_batch else 2
k = utils.unzip_axes(k, start_axis)
b, agg_1, agg_2, non_agg_1, non_agg_2 = bucket_axes(k.ndim, start_axis)
permutation = b + agg_1 + agg_2 + non_agg_1 + non_agg_2
k = np.transpose(k, permutation)
k_shape = k.shape
k = k.reshape(
k.shape[:start_axis] +
(agg_size,) * 2 +
k.shape[start_axis + 2 * len(agg_axes):]
)
fn = pass_messages_self if diagonal_batch else pass_messages_cross
k = fn(s1, s2, r1, r2, k)
k = k.reshape(k_shape)
k = np.transpose(k, onp.argsort(permutation))
return utils.zip_axes(k, start_axis)
nngp = agg(k.nngp, False, s1, r1, s2, r2)
ntk = agg(k.ntk, False, s1, r1, s2, r2)
cov1 = agg(k.cov1, k.diagonal_batch, s1, r1, s1, r1)
cov2 = agg(k.cov2, k.diagonal_batch, s2, r2, s2, r2)
k = k.replace(nngp=nngp, ntk=ntk, cov1=cov1, cov2=cov2)
else:
raise ValueError(f'Unregocnized `implementation == {implementation}.')
return k
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=True)
def Dense(
out_dim: int,
W_std: float = 1.,
b_std: Optional[float] = None,
batch_axis: int = 0,
channel_axis: int = -1,
parameterization: str = 'ntk',
s: Tuple[int, int] = (1, 1),
) -> InternalLayerMasked:
r"""Dense (fully-connected, matrix product).
Based on :obj:`jax.example_libraries.stax.Dense`.
Args:
out_dim:
The output feature / channel dimension. This is ignored in by the
`kernel_fn` in `"ntk"` parameterization.
W_std:
Specifies the standard deviation of the weights.
b_std:
Specifies the standard deviation of the biases. `None` means no bias.
batch_axis:
Specifies which axis is contains different elements of the batch.
Defaults to `0`, the leading axis.
channel_axis: Specifies which axis contains the features / channels.
Defaults to `-1`, the trailing axis. For `kernel_fn`, channel size is
considered to be infinite.
parameterization:
Either `"ntk"` or `"standard"`.
Under `"ntk"` parameterization (page 3 in "`Neural Tangent Kernel:
Convergence and Generalization in Neural Networks
<https://arxiv.org/abs/1806.07572>`_"),
weights and biases are initialized as
:math:`W_{ij} \sim \mathcal{N}(0,1)`, :math:`b_i \sim \mathcal{N}(0,1)`,
and the finite width layer equation is
:math:`z_i = \sigma_W / \sqrt{N} \sum_j W_{ij} x_j + \sigma_b b_i`, where
`N` is `out_dim`.
Under `"standard"` parameterization ("`On the infinite width limit of
neural networks with a standard parameterization
<https://arxiv.org/abs/2001.07301>`_".),
weights and biases are initialized as :math:`W_{ij} \sim \mathcal{N}(0,
W_{std}^2/N)`,
:math:`b_i \sim \mathcal{N}(0,\sigma_b^2)`, and the finite width layer
equation is
:math:`z_i = \frac{1}{s} \sum_j W_{ij} x_j + b_i`, where `N` is `out_dim`.
`N` corresponds to the respective variable in
"`On the infinite width limit of neural networks with a standard
parameterization <https://arxiv.org/abs/2001.07301>`_".
s:
only applicable when `parameterization="standard"`. A tuple of integers
specifying the width scalings of the input and the output of the layer,
i.e. the weight matrix `W` of the layer has shape
`(s[0] * in_dim, s[1] * out_dim)`, and the bias has size `s[1] * out_dim`.
.. note::
We need `s[0]` (scaling of the previous layer) to infer `in_dim` from
`input_shape`. Further, for the bottom layer, `s[0]` must be `1`, and
for all other layers `s[0]` must be equal to `s[1]` of the previous
layer. For the top layer, `s[1]` is expected to be `1` (recall that the
output size is `s[1] * out_dim`, and in common infinite network
research input and output sizes are considered fixed).
`s` corresponds to the respective variable in
"`On the infinite width limit of neural networks with a standard
parameterization <https://arxiv.org/abs/2001.07301>`_".
For `parameterization="ntk"`, or for standard, finite-width networks
corresponding to He initialization, `s=(1, 1)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
# TODO(jaschasd): after experimentation, evaluate whether to change default
# parameterization from "ntk" to "standard"
parameterization = parameterization.lower()
def _init_fn(rng, input_shape, out_dim):
_channel_axis = channel_axis % len(input_shape)
output_shape = (input_shape[:_channel_axis] + (out_dim,)
+ input_shape[_channel_axis + 1:])
rng1, rng2 = random.split(rng)
W = random.normal(rng1, (input_shape[_channel_axis], out_dim))
if b_std is None:
b = None
else:
b_shape = [1] * len(input_shape)
b_shape[channel_axis] = out_dim
b = random.normal(rng2, b_shape)
return output_shape, (W, b)
def ntk_init_fn(rng, input_shape):
return _init_fn(rng, input_shape, out_dim)
def standard_init_fn(rng, input_shape):
output_shape, (W, b) = _init_fn(rng, input_shape, out_dim * s[1])
W *= W_std / (input_shape[channel_axis] / s[0])**0.5
b = None if b is None else b * b_std
return output_shape, (W, b)
if parameterization == 'ntk':
init_fn = ntk_init_fn
elif parameterization == 'standard':
init_fn = standard_init_fn
else:
raise ValueError(f'Parameterization not supported: {parameterization}')
def apply_fn(params, inputs, **kwargs):
W, b = params
prod = np.moveaxis(np.tensordot(W, inputs, (0, channel_axis)),
0, channel_axis)
if parameterization == 'ntk':
norm = W_std / inputs.shape[channel_axis]**0.5
outputs = norm * prod
if b is not None:
outputs += b_std * b
elif parameterization == 'standard':
outputs = prod / s[0]**0.5
if b is not None:
outputs += b
else:
raise ValueError(f'Parameterization not supported: {parameterization}')
return outputs
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_spatial=Diagonal())
def kernel_fn(k: Kernel, **kwargs):
"""Compute the transformed kernels after a `Dense` layer."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
def fc(x):
return _affine(x, W_std, b_std)
if parameterization == 'ntk':
cov1, nngp, cov2 = map(fc, (cov1, nngp, cov2))
if ntk is not None:
ntk = nngp + W_std**2 * ntk
elif parameterization == 'standard':
input_width = k.shape1[channel_axis] / s[0]
if ntk is not None:
ntk = input_width * nngp + W_std**2 * ntk
if b_std is not None:
ntk += 1.
cov1, nngp, cov2 = map(fc, (cov1, nngp, cov2))
return k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_gaussian=True,
is_input=False)
def mask_fn(mask, input_shape):
return np.all(mask, axis=channel_axis, keepdims=True)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=True)
def Conv(
out_chan: int,
filter_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
W_std: float = 1.0,
b_std: Optional[float] = None,
dimension_numbers: Optional[Tuple[str, str, str]] = None,
parameterization: str = 'ntk',
s: Tuple[int, int] = (1, 1),
) -> InternalLayerMasked:
"""General convolution.
Based on :obj:`jax.example_libraries.stax.GeneralConv`.
Args:
out_chan:
The number of output channels / features of the convolution. This is
ignored in by the `kernel_fn` in NTK parameterization.
filter_shape:
The shape of the filter. The shape of the tuple should agree with the
number of spatial dimensions in `dimension_numbers`.
strides:
The stride of the convolution. The shape of the tuple should agree with
the number of spatial dimensions in `dimension_numbers`.
padding:
Specifies padding for the convolution. Can be one of `"VALID"`, `"SAME"`,
or `"CIRCULAR"`. `"CIRCULAR"` uses periodic convolutions.
W_std:
The standard deviation of the weights.
b_std:
The standard deviation of the biases.
dimension_numbers:
Specifies which axes should be convolved over. Should match the
specification in :obj:`jax.lax.conv_general_dilated`.
parameterization:
Either `"ntk"` or `"standard"`. These parameterizations are the direct
analogues for convolution of the corresponding parameterizations for
:obj:`Dense` layers.
s:
A tuple of integers, a direct convolutional analogue of the respective
parameters for the :obj:`Dense` layer.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Conv(out_chan, filter_shape, strides, padding, W_std, b_std,
dimension_numbers, parameterization, s, False, True)
@layer
@supports_masking(remask_kernel=True)
def ConvTranspose(
out_chan: int,
filter_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
W_std: float = 1.0,
b_std: Optional[float] = None,
dimension_numbers: Optional[Tuple[str, str, str]] = None,
parameterization: str = 'ntk',
s: Tuple[int, int] = (1, 1),
) -> InternalLayerMasked:
"""General transpose convolution.
Based on :obj:`jax.example_libraries.stax.GeneralConvTranspose`.
Args:
out_chan:
The number of output channels / features of the convolution. This is
ignored in by the `kernel_fn` in `"ntk"` parameterization.
filter_shape:
The shape of the filter. The shape of the tuple should agree with the
number of spatial dimensions in `dimension_numbers`.
strides:
The stride of the convolution. The shape of the tuple should agree with
the number of spatial dimensions in `dimension_numbers`.
padding:
Specifies padding for the convolution. Can be one of `"VALID"`, `"SAME"`,
or `"CIRCULAR"`. `"CIRCULAR"` uses periodic convolutions.
W_std:
standard deviation of the weights.
b_std:
standard deviation of the biases.
dimension_numbers:
Specifies which axes should be convolved over. Should match the
specification in :obj:`jax.lax.conv_general_dilated`.
parameterization:
Either `"ntk"` or `"standard"`. These parameterizations are the direct
analogues for convolution of the corresponding parameterizations for
:obj:`Dense` layers.
s:
A tuple of integers, a direct convolutional analogue of the respective
parameters for the :obj:`Dense` layer.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Conv(out_chan, filter_shape, strides, padding, W_std, b_std,
dimension_numbers, parameterization, s, True, True)
@layer
@supports_masking(remask_kernel=True)
def ConvLocal(
out_chan: int,
filter_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
W_std: float = 1.0,
b_std: Optional[float] = None,
dimension_numbers: Optional[Tuple[str, str, str]] = None,
parameterization: str = 'ntk',
s: Tuple[int, int] = (1, 1),
) -> InternalLayerMasked:
"""General unshared convolution.
Also known and "Locally connected networks" or LCNs, these are equivalent to
convolutions except for having separate (unshared) kernels at different
spatial locations.
Args:
out_chan:
The number of output channels / features of the convolution. This is
ignored in by the `kernel_fn` in `"ntk"` parameterization.
filter_shape:
The shape of the filter. The shape of the tuple should agree with the
number of spatial dimensions in `dimension_numbers`.
strides:
The stride of the convolution. The shape of the tuple should agree with
the number of spatial dimensions in `dimension_numbers`.
padding:
Specifies padding for the convolution. Can be one of `"VALID"`, `"SAME"`,
or `"CIRCULAR"`. `"CIRCULAR"` uses periodic convolutions.
W_std:
standard deviation of the weights.
b_std:
standard deviation of the biases. `None` means no bias.
dimension_numbers:
Specifies which axes should be convolved over. Should match the
specification in :obj:`jax.lax.conv_general_dilated`.
parameterization:
Either `"ntk"` or `"standard"`. These parameterizations are the direct
analogues for convolution of the corresponding parameterizations for
:obj:`Dense` layers.
s:
A tuple of integers, a direct convolutional analogue of the respective
parameters for the :obj:`Dense` layer.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Conv(out_chan, filter_shape, strides, padding, W_std, b_std,
dimension_numbers, parameterization, s, False, False)
def _Conv(
out_chan: int,
filter_shape: Sequence[int],
strides: Optional[Sequence[int]],
padding: str,
W_std: float,
b_std: Optional[float],
dimension_numbers: Optional[Tuple[str, str, str]],
parameterization: str,
s: Tuple[int, int],
transpose: bool,
shared_weights: bool
) -> InternalLayerMasked:
"""General convolution.
Based on :obj:`jax.example_libraries.stax.GeneralConv`.
Args:
out_chan:
The number of output channels / features of the convolution. This is
ignored in by the `kernel_fn` in NTK parameterization.
filter_shape: The shape of the filter.
The shape of the tuple should agree with the number of spatial dimensions
in `dimension_numbers`.
strides:
The stride of the convolution. The shape of the tuple should agree with
the number of spatial dimensions in `dimension_numbers`.
padding:
Specifies padding for the convolution. Can be one of `"VALID"`, `"SAME"`,
or `"CIRCULAR"`. `"CIRCULAR"` uses periodic convolutions.
W_std:
The standard deviation of the weights.
b_std:
The standard deviation of the biases. `None` means no bias.
dimension_numbers:
Specifies which axes should be convolved over. Should match the
specification in :obj:`jax.lax.dot_general_dilated`.
parameterization:
Either `"ntk"` or `"standard"`. These parameterizations are the direct
analogues for convolution of the corresponding parameterizations for
`Dense` layers.
s:
A tuple of integers, a direct convolutional analogue of the respective
parameters for the :obj:`Dense` layer.
transpose:
`True` to use transpose convolution.
shared_weights:
`True` to share weights (regular CNNs); otherwise different weights at
different spatial locations (locally connected networks, LCNs).
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
parameterization = parameterization.lower()
if dimension_numbers is None:
dimension_numbers = _get_dimension_numbers(len(filter_shape), False)
lhs_spec, rhs_spec, out_spec = dimension_numbers
one = (1,) * len(filter_shape)
strides = strides or one
padding = Padding(padding)
if padding == Padding.CIRCULAR:
apply_padding = Padding.VALID
init_padding = padding.SAME
else:
init_padding = apply_padding = padding
if parameterization == 'ntk':
out_chan_arg = out_chan
elif parameterization == 'standard':
out_chan_arg = out_chan * s[1]
else:
raise ValueError(parameterization)
init_args = dict(dimension_numbers=dimension_numbers,
out_chan=out_chan_arg,
filter_shape=filter_shape,
strides=strides,
padding=init_padding.name,
W_init=random.normal,
b_init=random.normal)
def get_ntk_init_fn(ostax_init_fn):
def ntk_init_fn(rng, input_shape):
output_shape, (W, b) = ostax_init_fn(rng, input_shape)
if b_std is None:
b = None
return output_shape, (W, b)
return ntk_init_fn
if transpose:
if not shared_weights:
raise NotImplementedError('Unshared transpose CNN not implemented.')
lax_conv = lax.conv_transpose
ostax_init_fn, _ = ostax.GeneralConvTranspose(**init_args)
ntk_init_fn = get_ntk_init_fn(ostax_init_fn)
else:
if shared_weights:
lax_conv = lax.conv_general_dilated
ostax_init_fn, _ = ostax.GeneralConv(**init_args)
ntk_init_fn = get_ntk_init_fn(ostax_init_fn)
else:
lax_conv = functools.partial(lax.conv_general_dilated_local,
filter_shape=filter_shape)
def ntk_init_fn(rng, input_shape):
"""Adapted from :obj:`jax.example_libraries.stax.GeneralConv`."""
filter_shape_iter = iter(filter_shape)
conv_kernel_shape = [out_chan if c == 'O' else
input_shape[lhs_spec.index('C')] if c == 'I' else
next(filter_shape_iter) for c in rhs_spec]
output_shape = eval_shape(
lambda lhs, rhs: lax.conv_general_dilated(
lhs=lhs,
rhs=rhs,
window_strides=strides,
padding=init_padding.name,
dimension_numbers=dimension_numbers
),
ShapedArray(input_shape, np.float32),
ShapedArray(conv_kernel_shape, np.float32)
).shape
kernel_shape = [out_chan if c == 'O' else
onp.prod(conv_kernel_shape) // out_chan if c == 'I' else
output_shape[out_spec.index(c)] for c in rhs_spec]
bias_shape = [output_shape[i] if c != 'N' else 1
for i, c in enumerate(out_spec)]
k1, k2 = random.split(rng)
W = random.normal(k1, kernel_shape)
b = None if b_std is None else random.normal(k2, bias_shape)
return output_shape, (W, b)
def get_fan_in(input_shape):
return input_shape[lhs_spec.index('C')] * onp.prod(filter_shape)
def standard_init_fn(rng, input_shape):
output_shape, (W, b) = ntk_init_fn(rng, input_shape)
norm = W_std / (get_fan_in(input_shape) / s[0])**0.5
return output_shape, (W * norm, None if b_std is None else b * b_std)
if parameterization == 'ntk':
init_fn = ntk_init_fn
elif parameterization == 'standard':
init_fn = standard_init_fn
else:
raise ValueError(f'Parameterization not supported: {parameterization}.')
def apply_fn(params, inputs, **kwargs):
W, b = params
if parameterization == 'ntk':
norm = W_std / get_fan_in(inputs.shape)**0.5
b_rescale = b_std
elif parameterization == 'standard':
norm = 1. / s[0]**0.5
b_rescale = 1.
else:
raise NotImplementedError(parameterization)
if padding == Padding.CIRCULAR and not transpose:
spatial_axes = tuple(lhs_spec.index(c)
for c in rhs_spec if c not in ('I', 'O'))
inputs = _same_pad_for_filter_shape(inputs, filter_shape, strides,
spatial_axes)
res = norm * lax_conv(
inputs,
W,
strides,
apply_padding.name,
dimension_numbers=dimension_numbers)
if padding == Padding.CIRCULAR and transpose:
out_shape = eval_shape(lambda x: lax.conv_transpose(
lhs=x,
rhs=W,
strides=strides,
padding=Padding.SAME.name,
dimension_numbers=dimension_numbers
), inputs).shape
spatial_axes = tuple(out_spec.index(c)
for c in rhs_spec if c not in ('I', 'O'))
res = _same_pad_for_filter_shape_transpose(res, spatial_axes, out_shape)
if b is not None:
res += b_rescale * b
return res
@requires(batch_axis=lhs_spec.index('N'),
channel_axis=lhs_spec.index('C'),
diagonal_spatial=Diagonal(
output=Bool.NO if shared_weights else Bool.MAYBE))
def kernel_fn(k: Kernel, **kwargs):
"""Compute the transformed kernels after a conv layer."""
cov1, nngp, cov2, ntk, is_reversed = (k.cov1, k.nngp, k.cov2, k.ntk,
k.is_reversed)
input_spec = tuple(c for c in lhs_spec if c not in ('N', 'C'))
conv_spec = tuple(c for c in rhs_spec if c not in ('I', 'O'))
input_to_filter_permutation = tuple(conv_spec.index(c) for c in input_spec)
filter_shape_kernel = tuple(filter_shape[p] for p in
input_to_filter_permutation)
strides_kernel = tuple(strides[p] for p in
input_to_filter_permutation)
if k.diagonal_spatial:
conv_kernel = (_conv_kernel_diagonal_spatial_transpose
if transpose else _conv_kernel_diagonal_spatial)
else:
if shared_weights:
if is_reversed:
filter_shape_kernel = filter_shape_kernel[::-1]
strides_kernel = strides_kernel[::-1]
is_reversed = not is_reversed
if transpose:
conv_kernel = _conv_kernel_full_spatial_transpose
else:
if shared_weights:
conv_kernel = _conv_kernel_full_spatial_shared
else:
conv_kernel = _conv_kernel_full_spatial_unshared
def conv_unscaled(lhs, batch_ndim):
lhs = conv_kernel(lhs,
filter_shape_kernel,
strides_kernel,
padding,
batch_ndim)
return lhs
def affine(out, scale, shift, batch_ndim):
if out is not None:
out *= scale
if shift is not None:
if k.diagonal_spatial or shared_weights:
out += shift
else:
idx = (Ellipsis,)
for i in range(batch_ndim, out.ndim, 2):
shape = [1] * out.ndim
size = out.shape[i]
shape[i] = size
idx += (np.arange(size).reshape(shape),) * 2
out = out.at[idx].add(shift)
return out
b_std_sq = None if b_std is None else b_std**2
def conv(lhs, batch_ndim):
out = conv_unscaled(lhs, batch_ndim)
out = affine(out, W_std**2, b_std_sq, batch_ndim)
return out
cov1 = conv(cov1, 1 if k.diagonal_batch else 2)
cov2 = conv(cov2, 1 if k.diagonal_batch else 2)
if parameterization == 'ntk':
nngp = conv(nngp, 2)
if ntk is not None:
ntk = W_std**2 * conv_unscaled(ntk, 2) + nngp
elif parameterization == 'standard':
nngp_unscaled = conv_unscaled(nngp, 2)
if ntk is not None:
ntk = (get_fan_in(k.shape1) / s[0] * nngp_unscaled +
W_std ** 2 * conv_unscaled(ntk, 2))
if b_std is not None:
ntk = affine(ntk, 1, 1., 2)
nngp = affine(nngp_unscaled, W_std**2, b_std_sq, 2)
res = k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_gaussian=True,
is_reversed=is_reversed,
batch_axis=out_spec.index('N'),
channel_axis=out_spec.index('C'),
is_input=False)
# Reorder output spatial dimensions if the finite layer does so.
# TODO(romann): make more efficient / lazy.
out_spec_kernel = tuple(c for c in out_spec if c not in ('N', 'C'))
in_to_out_permutation = tuple(out_spec_kernel.index(c) for c in input_spec)
res = res.transpose(in_to_out_permutation)
return res
def mask_fn(mask, input_shape):
batch_axis, channel_axis = lhs_spec.index('N'), lhs_spec.index('C')
# Collapse channel dimension of masks, since an FC layer is applied at each
# spatial location.
mask = np.all(mask, axis=channel_axis, keepdims=True)
if transpose:
rhs_shape = list(filter_shape)
for c in ('O', 'I'):
rhs_shape.insert(rhs_spec.index(c), 1)
# TODO(romann): revisit based on http://b/235531081.
rhs = np.ones(
rhs_shape,
dtype=None if jax.default_backend() == 'gpu' else mask.dtype)
mask = lax.conv_transpose(
mask.astype(rhs.dtype),
rhs,
strides,
init_padding.name,
dimension_numbers=dimension_numbers).astype(mask.dtype)
else:
mask = _pool_mask(mask, filter_shape, strides, init_padding,
batch_axis, channel_axis)
mask = np.transpose(mask, (out_spec.index(c) for c in lhs_spec))
return mask
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=True)
def AvgPool(window_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
normalize_edges: bool = False,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayerMasked:
"""Average pooling.
Based on :obj:`jax.example_libraries.stax.AvgPool`.
Args:
window_shape: The number of pixels over which pooling is to be performed.
strides: The stride of the pooling window. `None` corresponds to a stride of
`(1, 1)`.
padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR`
uses periodic boundary conditions on the image.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Pool(_Pooling.AVG, window_shape, strides, padding, normalize_edges,
batch_axis, channel_axis)
@layer
@supports_masking(remask_kernel=True)
def SumPool(window_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayerMasked:
"""Sum pooling.
Based on :obj:`jax.example_libraries.stax.SumPool`.
Args:
window_shape: The number of pixels over which pooling is to be performed.
strides: The stride of the pooling window. `None` corresponds to a stride of
`(1, ..., 1)`.
padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR`
uses periodic boundary conditions on the image.
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Pool(_Pooling.SUM, window_shape, strides, padding, False,
batch_axis, channel_axis)
def _Pool(
pool_type: _Pooling,
window_shape: Sequence[int],
strides: Optional[Sequence[int]],
padding: str,
normalize_edges: bool,
batch_axis: int,
channel_axis: int) -> InternalLayerMasked:
"""General pooling.
Based on :obj:`jax.example_libraries.stax.AvgPool` and
:obj:`jax.example_libraries.stax.SumPool`.
Args:
pool_type: specifies whether average or sum pooling should be performed.
(`Pooling.AVG` or `Pooling.SUM`)
window_shape: The number of pixels over which pooling is to be performed.
strides: The stride of the pooling window. `None` corresponds to a stride of
`(1, 1)`.
padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR`
uses periodic boundary conditions on the image.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
strides = strides or (1,) * len(window_shape)
padding = Padding(padding)
if pool_type == _Pooling.AVG:
pool_fn = ostax.AvgPool
elif pool_type == _Pooling.SUM:
pool_fn = ostax.SumPool
else:
raise ValueError('Invalid pooling type {}'.format(pool_type))
spec = ''.join(c for c in string.ascii_uppercase
if c not in ('N', 'C'))[:len(strides)]
for a in sorted((batch_axis, channel_axis % (2 + len(strides)))):
if a == batch_axis:
spec = spec[:a] + 'N' + spec[a:]
else:
spec = spec[:a] + 'C' + spec[a:]
if padding == Padding.CIRCULAR:
init_fn, _ = pool_fn(window_shape, strides, Padding.SAME.name, spec)
_, apply_fn_0 = pool_fn(window_shape, strides, Padding.VALID.name, spec)
def apply_fn(params, inputs, **kwargs):
non_spatial_axes = (batch_axis, channel_axis % inputs.ndim)
spatial_axes = tuple(i for i in range(inputs.ndim)
if i not in non_spatial_axes)
inputs = _same_pad_for_filter_shape(inputs, window_shape, strides,
spatial_axes)
res = apply_fn_0(params, inputs, **kwargs)
return res
elif normalize_edges or pool_type == _Pooling.SUM:
init_fn, apply_fn = pool_fn(window_shape, strides, padding.name, spec)
else:
def rescaler(dims, strides, padding):
del dims, strides, padding # Unused.
return lambda outputs, inputs, spec: outputs / onp.prod(window_shape)
pool_fn = _pooling_layer(lax.add, 0., rescaler)
init_fn, apply_fn = pool_fn(window_shape, strides, padding.name, spec)
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_spatial=Diagonal(input=Bool.MAYBE))
def kernel_fn(k: Kernel, **kwargs) -> Kernel:
"""Kernel transformation."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if k.diagonal_spatial:
window_shape_kernel = window_shape
strides_kernel = strides
else:
window_shape_kernel = utils.double_tuple(
window_shape[::(-1 if k.is_reversed else 1)])
strides_kernel = utils.double_tuple(strides[::(-1 if k.is_reversed else 1)])
def pool(mat, batch_ndim):
if mat is None or mat.ndim == 0:
return mat
out = _pool_kernel(mat, pool_type, window_shape_kernel, strides_kernel,
padding, normalize_edges, batch_ndim)
if k.diagonal_spatial and pool_type == _Pooling.AVG:
_window_shape = (1,) * batch_ndim + tuple(window_shape)
_strides = (1,) * batch_ndim + tuple(strides)
out = _normalize(mat, out, normalize_edges, padding, _strides,
_window_shape)
return out
nngp = pool(nngp, 2)
ntk = pool(ntk, 2)
cov1 = pool(cov1, 1 if k.diagonal_batch else 2)
cov2 = pool(cov2, 1 if k.diagonal_batch else 2)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
def mask_fn(mask, input_shape):
_check_is_implemented(mask, channel_axis)
return _pool_mask(mask, window_shape, strides, padding,
batch_axis, channel_axis)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def GlobalSumPool(
batch_axis: int = 0,
channel_axis: int = -1
) -> InternalLayerMasked:
"""Global sum pooling.
Sums over and removes (`keepdims=False`) all spatial dimensions, preserving
the order of batch and channel axes.
Args:
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _GlobalPool(_Pooling.SUM, batch_axis, channel_axis)
@layer
@supports_masking(remask_kernel=False)
def GlobalAvgPool(
batch_axis: int = 0,
channel_axis: int = -1
) -> InternalLayerMasked:
"""Global average pooling.
Averages over and removes (`keepdims=False`) all spatial dimensions,
preserving the order of batch and channel axes.
Args:
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _GlobalPool(_Pooling.AVG, batch_axis, channel_axis)
def _GlobalPool(
pool_type: _Pooling,
batch_axis: int,
channel_axis: int
) -> InternalLayerMasked:
"""General global pooling.
Pools over and removes (`keepdims=False`) all spatial dimensions, preserving
the order of batch and channel axes.
Args:
pool_type: specifies whether average or sum pooling should be performed.
(`Pooling.AVG` or `Pooling.SUM`).
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if pool_type == _Pooling.AVG:
pool_fn = lambda x, axis, mask: mean_and_var(x, axis, mask=mask)[0]
elif pool_type == _Pooling.SUM:
pool_fn = lambda x, axis, mask: np.sum(x, axis)
else:
raise ValueError(f'Invalid pooling type {pool_type}.')
def init_fn(rng, input_shape):
ndim = len(input_shape)
non_spatial_axes = (batch_axis % ndim, channel_axis % ndim)
output_shape = tuple(input_shape[i] for i in range(ndim)
if i in non_spatial_axes)
return output_shape, ()
def apply_fn(params, inputs, mask=None, **kwargs):
non_spatial_axes = (batch_axis % inputs.ndim, channel_axis % inputs.ndim)
spatial_axes = tuple(i for i in range(inputs.ndim)
if i not in non_spatial_axes)
out = pool_fn(inputs, spatial_axes, mask)
return out
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_spatial=Diagonal(input=Bool.MAYBE, output=Bool.YES))
def kernel_fn(k: Kernel, **kwargs):
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
def _pool(mat, batch_ndim, mask=None):
if mat is None:
return mat
spatial_axes = tuple(range(batch_ndim, mat.ndim))
out = pool_fn(mat, axis=spatial_axes, mask=mask)
if k.diagonal_spatial and pool_type == _Pooling.AVG:
out /= utils.size_at(mat, spatial_axes)
return out
mask11, mask12, mask22 = k._get_mask_prods(k.mask1, k.mask2)
cov1 = _pool(cov1, 1 if k.diagonal_batch else 2, mask11)
cov2 = _pool(cov2, 1 if k.diagonal_batch else 2, mask22)
nngp = _pool(nngp, 2, mask12)
ntk = _pool(ntk, 2, mask12)
ndim = len(k.shape1)
batch_first = batch_axis % ndim < channel_axis % ndim
return k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
batch_axis=0 if batch_first else 1,
channel_axis=1 if batch_first else 0,
is_reversed=False)
def mask_fn(mask, input_shape):
_check_is_implemented(mask, channel_axis)
non_spatial_axes = (batch_axis % mask.ndim, channel_axis % mask.ndim)
spatial_axes = tuple(i for i in range(mask.ndim)
if i not in non_spatial_axes)
return np.all(mask, spatial_axes)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def Flatten(
batch_axis: int = 0,
batch_axis_out: int = 0
) -> InternalLayerMasked:
"""Flattening all non-batch dimensions.
Based on :obj:`jax.example_libraries.stax.Flatten`, but allows to specify
batch axes.
Args:
batch_axis:
Specifies the input batch dimension. Defaults to `0`, the leading axis.
batch_axis_out:
Specifies the output batch dimension. Defaults to `0`, the leading axis.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if batch_axis_out in (0, -2):
batch_axis_out = 0
channel_axis_out = 1
elif batch_axis_out in (1, -1):
batch_axis_out = 1
channel_axis_out = 0
else:
raise ValueError(
f'`batch_axis_out` must be 0 or 1, got {batch_axis_out}.')
def get_output_shape(input_shape):
batch_size = input_shape[batch_axis]
channel_size = functools.reduce(
op.mul,
input_shape[:batch_axis] + input_shape[(batch_axis + 1)
or len(input_shape):],
1
)
if batch_axis_out == 0:
return batch_size, channel_size
return channel_size, batch_size
def init_fn(rng, input_shape):
output_shape = get_output_shape(input_shape)
return output_shape, ()
def apply_fn(params, inputs, **kwargs):
output_shape = get_output_shape(inputs.shape)
inputs = np.moveaxis(inputs, batch_axis, -batch_axis_out)
return inputs.reshape(output_shape)
@requires(batch_axis=batch_axis,
channel_axis=None,
diagonal_spatial=Diagonal(output=Bool.YES))
def kernel_fn(k: Kernel, **kwargs):
"""Compute kernels."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
def trace(x, batch_ndim):
if x is None:
return x
if k.diagonal_spatial:
spatial_axes = tuple(range(x.ndim)[batch_ndim:])
x = np.mean(x, spatial_axes)
else:
while x.ndim > batch_ndim:
x = np.trace(x, axis1=-2, axis2=-1) / x.shape[-1]
return x
cov1 = trace(cov1, 1 if k.diagonal_batch else 2)
cov2 = trace(cov2, 1 if k.diagonal_batch else 2)
nngp = trace(nngp, 2)
ntk = trace(ntk, 2)
return k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_gaussian=False,
is_reversed=False,
batch_axis=batch_axis_out,
channel_axis=channel_axis_out,
diagonal_spatial=False)
def mask_fn(mask, input_shape):
mask = np.broadcast_to(mask, input_shape)
output_shape = get_output_shape(mask.shape)
return np.moveaxis(mask, batch_axis, batch_axis_out).reshape(output_shape)
return init_fn, apply_fn, kernel_fn, mask_fn
class PositionalEmbedding(enum.Enum):
"""Type of positional embeddings to use in a :obj:`GlobalSelfAttention` layer.
Attributes:
NONE:
no additional positional embeddings.
SUM:
positional embeddings are added to activations.
CONCAT:
positional embeddings are concatenated with activations.
"""
NONE = 'NONE'
SUM = 'SUM'
CONCAT = 'CONCAT'
class AttentionMechanism(enum.Enum):
"""Type of nonlinearity to use in a :obj:`GlobalSelfAttention` layer.
Attributes:
SOFTMAX:
attention weights are computed by passing the dot product between keys
and queries through :obj:`jax.nn.softmax`.
IDENTITY:
attention weights are the dot product between keys and queries.
ABS:
attention weights are computed by passing the dot product between keys
and queries through :obj:`jax.numpy.abs`.
RELU:
attention weights are computed by passing the dot product between keys
and queries through :obj:`jax.nn.relu`.
"""
SOFTMAX = 'SOFTMAX'
IDENTITY = 'IDENTITY'
ABS = 'ABS'
RELU = 'RELU'
def fn(self):
return {
'softmax': ostax.softmax,
'identity': lambda x: x,
'abs': np.abs,
'relu': lambda x: np.maximum(x, 0.)
}[self.name.lower()]
@layer
@supports_masking(remask_kernel=True)
def GlobalSelfAttention(
n_chan_out: int,
n_chan_key: int,
n_chan_val: int,
n_heads: int,
linear_scaling: bool = True,
W_key_std: float = 1.0,
W_value_std: float = 1.0,
W_query_std: float = 1.0,
W_out_std: float = 1.0,
b_std: Optional[float] = None,
attention_mechanism: str = AttentionMechanism.SOFTMAX.name,
pos_emb_type: str = PositionalEmbedding.NONE.name,
pos_emb_p_norm: float = 2,
pos_emb_decay_fn: Optional[Callable[[float], float]] = None,
n_chan_pos_emb: Optional[int] = None,
W_pos_emb_std: float = 1.0,
val_pos_emb: bool = False,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayerMasked:
"""Global scaled dot-product self-attention.
Infinite width results based on
"`Infinite attention: NNGP and NTK for deep attention networks
<https://arxiv.org/abs/2006.10540>`_".
Two versions of attention are available (the version to be used is
determined by the argument `linear_scaling`):
1. `False`: this is the standard scaled dot-product attention, i.e.,
the dot product between keys and queries is scaled by the squared root
of their dimension. The expression for `nngp`/`ntk` involves an integral
with no known closed form and thus call to `kernel_fn` results in an error.
2. `True`: scaling the dot products between keys and queries by their
dimension instead of the square root of the same quantity, AND tying the key
and query weight matrices. This makes the `nngp`/`ntk` analytically tractable
but for the price that, unlike in the `False` case, the dot products of keys
and queries converge to a constant. Because this constant would be zero
if the key and query weights were independent, the variant where these
two weight matrices are tied was implemented resulting in non-constant
attention weights.
The final computation for single head is then
`f_h (x) + attention_mechanism(<scaling> Q(x) K(x)^T) V(x)`
and the output of this layer is computed as
`f(x) = concat[f_1(x) , ... , f_{<n_{heads}>} (x)] W_{out} + b`
where the shape of `b` is `(n_chan_out,)`, i.e., single bias per channel.
The `kernel_fn` computes the limiting kernel of the outputs of this layer
as the number of heads and the number of feature dimensions of keys/queries
goes to infinity.
For details, please see "`Infinite attention: NNGP and NTK for deep attention
networks <https://arxiv.org/abs/2006.10540>`_".
Args:
n_chan_out:
number of feature dimensions of outputs.
n_chan_key:
number of feature dimensions of keys/queries.
n_chan_val:
number of feature dimensions of values.
n_heads:
number of attention heads.
linear_scaling:
if `True`, the dot products between keys and queries are scaled by
`1 / n_chan_key` and the key and query weight matrices are tied;
if `False`, the dot products are scaled by `1 / sqrt(n_chan_key)` and
the key and query matrices are independent.
W_key_std:
init standard deviation of the key weights values. Due to NTK
parameterization, influences computation only through the product
`W_key_std * W_query_std`.
W_value_std:
init standard deviation of the value weights values. Due to NTK
parameterization, influences computation only through the product
`W_out_std * W_value_std`.
W_query_std:
init standard deviation of the query weights values; if `linear_scaling`
is `True` (and thus key and query weights are tied - see above) then keys
are computed with `WK = W_key_std * W / sqrt(n_chan_in)` and queries are
computed with `WQ = W_query_std * W / sqrt(n_chan_in)` weight matrices.
Due to NTK parameterization, influences computation only through the
product `W_key_std * W_query_std`.
W_out_std:
initial standard deviation of the output weights values. Due to NTK
parameterization, influences computation only through the product
`W_out_std * W_value_std`.
b_std:
initial standard deviation of the bias values. `None` means no bias.
attention_mechanism:
a string, `"SOFTMAX"`, `"IDENTITY"`, `"ABS"`, or `"RELU"`, the
transformation applied to dot product attention weights.
pos_emb_type:
a string, `"NONE"`, `"SUM"`, or `"CONCAT"`, the type of positional
embeddings to use. In the infinite-width limit, `"SUM"` and `"CONCAT"`
are equivalent up to a scaling constant. Keep in mind that all `Dense`
sub-layers of the attention layer use the NTK parameterization, and weight
variances are always inversely proportional to the input channel size,
which leads to different effective variances when using `"SUM"` and
`"CONCAT"` embeddings, even if all variance scales like `W_key_std` etc.
are the same.
pos_emb_p_norm:
use the unnormalized L-`p` distance to the power of `p` (with
`p == pos_emb_p_norm`) to compute pairwise distances for positional
embeddings (see `pos_emb_decay_fn` for details). Used only if
`pos_emb_type != "NONE"` and `pos_emb_decay_fn is not None`.
pos_emb_decay_fn:
a function applied to the L-`p` distance to the power of `p` (with
`p == pos_emb_p_norm`) distance between two spatial positions to produce
the positional embeddings covariance matrix (e.g. power decay,
exponential decay, etc.). `None` is equivalent to an indicator function
`lambda d: d == 0`, and returns a diagonal covariance matrix. Used only
if `pos_emb_type != "NONE"`.
n_chan_pos_emb:
number of channels in positional embeddings. `None` means use the same
number of channels as in the layer inputs. Can be used to tune the
contribution of positional embeddings relative to contribution of inputs
if `pos_emb_type == "CONCAT"`. Used only if `pos_emb_type != "NONE"`.
Will trigger an error if `pos_emb_type == "SUM"` and `n_chan_pos_emb` is
not `None` or does not match the layer inputs channel size at runtime.
W_pos_emb_std:
init standard deviation of the random positional embeddings. Can be used
to tune the contribution of positional embeddings relative to the
contribution of inputs. Used only if `pos_emb_type != "NONE"`. To tune
the _relative_ (to the inputs) contribution, you can either use
`n_chan_pos_emb` when `pos_emb_type == "CONCAT"`, or, if
`pos_emb_type == "CONCAT"`, adjust `W_key_std` etc. relative to
`W_pos_emb_std`, to keep the total output variance fixed.
val_pos_emb:
`True` indicates using positional embeddings when computing all of the
keys/queries/values matrices, `False` makes them only used for keys and
queries, but not values. Used only if `pos_emb_type != "NONE"`.
batch_axis:
Specifies the batch dimension. Defaults to `0`, the leading axis.
channel_axis:
Specifies the channel / feature dimension. Defaults to `-1`, the trailing
axis. For `kernel_fn`, channel size is considered to be infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
Raises:
NotImplementedError: If `linear_scaling` is `False`, calling `kernel_fn`
will result in an error as there is no known analytic expression for the
kernel for `attention_mechanism != "IDENTITY"`.
NotImplementedError: If `apply_fn` is called with `pos_emb_decay_fn != None`
, since custom `pos_emb_decay_fn` is only implemented in the infinite
width regime currently.
"""
QK_std = W_query_std * W_key_std
OV_std = W_out_std * W_value_std
pos_emb_type = PositionalEmbedding(pos_emb_type)
attention_mechanism = AttentionMechanism(attention_mechanism)
@functools.lru_cache(1)
def get_pos_emb_L(spatial_shape):
with jax.core.eval_context():
size = utils.size_at(spatial_shape)
R = _pos_emb_pdist(spatial_shape, pos_emb_p_norm, pos_emb_decay_fn)
R = utils.unzip_axes(R)
L = np.linalg.cholesky(np.reshape(R, (size,) * 2)).reshape(R.shape)
return L
def init_fn(rng, input_shape):
_channel_axis = channel_axis % len(input_shape)
output_shape = (input_shape[:_channel_axis] + (n_chan_out,) +
input_shape[_channel_axis + 1:])
rng_Q, rng_K, rng_V, rng_O, rng_b, rng_pe = random.split(rng, 6)
rand = random.normal
n_chan_in_keys = n_chan_in_vals = input_shape[channel_axis]
# Generate and add / append positional embeddings.
if pos_emb_type == PositionalEmbedding.NONE:
pos_emb = None
else:
# `None` means positional embeddings have the same number of channels
# as inputs.
_n_chan_pos_emb = (n_chan_in_keys if n_chan_pos_emb is None
else n_chan_pos_emb)
pos_emb_shape = list(input_shape)
pos_emb_shape[channel_axis] = _n_chan_pos_emb
pos_emb_shape[batch_axis] = 1
pos_emb = rand(rng_pe, shape=pos_emb_shape)
if pos_emb_type == PositionalEmbedding.CONCAT:
n_chan_in_keys += _n_chan_pos_emb
if val_pos_emb:
n_chan_in_vals += _n_chan_pos_emb
key_matrices = rand(rng_K, shape=(n_heads, n_chan_in_keys, n_chan_key))
val_matrices = rand(rng_V, shape=(n_heads, n_chan_in_vals, n_chan_val))
W_out = rand(rng_O, shape=(n_chan_val * n_heads, n_chan_out))
if b_std is None:
b = None
else:
b_shape = [1] * len(input_shape)
b_shape[_channel_axis] = n_chan_out
b = rand(rng_b, shape=b_shape)
if linear_scaling:
query_matrices = None
warnings.warn('Linear scaling attention used -> query initialization '
'ignored, tying the weights '
'(see docstring for more details).')
else:
query_matrices = rand(rng_Q, (n_heads, n_chan_in_keys, n_chan_key))
return (output_shape,
(query_matrices, key_matrices, val_matrices, W_out, b, pos_emb))
def apply_fn(params: PyTree,
inputs: np.ndarray,
mask: Optional[np.ndarray] = None,
**kwargs) -> np.ndarray:
query_matrices, key_matrices, val_matrices, W_out, b, pos_emb = params
spatial_shape, spatial_axes = _shape_and_axes(inputs.shape,
(batch_axis, channel_axis))
n = inputs.shape[batch_axis]
if pos_emb is not None:
# Generate positional embeddings.
if pos_emb_decay_fn is not None:
L = get_pos_emb_L(spatial_shape)
first = tuple(range(L.ndim // 2))
last = tuple(range(L.ndim // 2, L.ndim))
pos_emb = np.tensordot(L, pos_emb, (last, spatial_axes))
pos_emb = np.moveaxis(pos_emb, first, spatial_axes)
# Mask positional embeddings.
if mask is not None:
pos_emb = np.where(mask, np.zeros((), pos_emb.dtype), pos_emb)
pos_emb *= W_pos_emb_std
# Add / concat positional embeddings.
if pos_emb_type == PositionalEmbedding.SUM:
inputs_val = None if val_pos_emb else inputs
inputs = pos_emb + inputs
elif pos_emb_type == PositionalEmbedding.CONCAT:
inputs_val = inputs if not val_pos_emb else None
_n_chan_pos_emb = (inputs.shape[channel_axis] if n_chan_pos_emb is None
else n_chan_pos_emb)
_channel_axis = channel_axis % inputs.ndim
pos_emb = np.broadcast_to(
pos_emb,
inputs.shape[:_channel_axis] + (_n_chan_pos_emb,) +
inputs.shape[_channel_axis + 1:])
inputs = np.concatenate([inputs, pos_emb], axis=channel_axis)
elif pos_emb_type == PositionalEmbedding.NONE:
inputs_val = None
# Prepare separate inputs for values if asked to not add positional
# embeddings to values.
if inputs_val is not None:
inputs_val = np.moveaxis(inputs_val, (batch_axis, channel_axis), (0, -1))
inputs_val = inputs_val.reshape((n, -1, inputs_val.shape[-1]))
# Flatten all spatial dimensions and make input of shape
# `(batch_size, total_spatial_size, n_channels)`.
inputs = np.moveaxis(inputs, (batch_axis, channel_axis), (0, -1))
inputs = inputs.reshape((n, -1, inputs.shape[-1]))
def _inputs_dot(matrices, _inputs=inputs):
ret = np.dot(_inputs, matrices)
return np.moveaxis(ret, 2, 0)
# Drop positional embedding information for value matrices if requested.
if inputs_val is not None:
values = _inputs_dot(val_matrices, inputs_val)
n_chan_in = inputs_val.shape[-1]
else:
values = _inputs_dot(val_matrices)
n_chan_in = inputs.shape[-1]
keys = _inputs_dot(key_matrices)
if linear_scaling:
queries = keys
else:
queries = _inputs_dot(query_matrices)
G_mat = np.matmul(queries, np.moveaxis(keys, -1, -2))
norm = inputs.shape[-1] * n_chan_key ** (1 if linear_scaling else 0.5)
G_mat *= QK_std / norm
if mask is not None:
mask = np.all(mask, axis=channel_axis, keepdims=True)
mask = np.moveaxis(mask, (batch_axis, channel_axis), (0, -1))
mask = mask.reshape((1, mask.shape[0], 1, -1))
if attention_mechanism == AttentionMechanism.SOFTMAX:
G_mat = np.where(mask, _NEG_INF, G_mat)
elif attention_mechanism in (AttentionMechanism.IDENTITY,
AttentionMechanism.RELU,
AttentionMechanism.ABS):
G_mat = np.where(mask, np.zeros((), G_mat.dtype), G_mat)
else:
raise NotImplementedError(attention_mechanism, mask)
G_mat = attention_mechanism.fn()(G_mat)
heads = np.matmul(G_mat, values)
heads = np.moveaxis(heads, 0, -1)
heads = np.reshape(heads, heads.shape[:-2] + (-1,))
outputs = np.matmul(heads, W_out)
outputs *= OV_std / (n_chan_val * n_heads * n_chan_in) ** 0.5
outputs = np.reshape(outputs, (n,) + spatial_shape + (n_chan_out,))
outputs = np.moveaxis(outputs, (0, -1), (batch_axis, channel_axis))
if b is not None:
outputs += b_std * b
return outputs
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_spatial=Diagonal(input=Bool.NO))
def kernel_fn(k: Kernel, **kwargs):
# Generate (optional) positional embedding covariances.
R1, R12, R2 = _get_all_pos_emb(k, pos_emb_type, pos_emb_p_norm,
pos_emb_decay_fn)
def _get_interpolation_coefficients():
input_weight, pos_emb_weight = 1, W_pos_emb_std**2
if pos_emb_type == PositionalEmbedding.CONCAT:
# Reweight based on relative widths of inputs and channels.
n_chan_input = k.shape1[channel_axis]
_n_chan_pos_emb = (k.shape1[channel_axis] if n_chan_pos_emb is None
else n_chan_pos_emb)
n_chan_total = n_chan_input + _n_chan_pos_emb
input_weight *= n_chan_input / n_chan_total
pos_emb_weight *= _n_chan_pos_emb / n_chan_total
return input_weight, pos_emb_weight
def weighted_sum(x, y, x_weight, y_weight):
if x is None or y is None:
return x
return x_weight * x + y_weight * y
# Generate kernel interpolations.
kern_weight, pos_emb_weight = _get_interpolation_coefficients()
cov1_interp = weighted_sum(k.cov1, R1, kern_weight, pos_emb_weight)
cov2_interp = weighted_sum(k.cov2, R2, kern_weight, pos_emb_weight)
if val_pos_emb or (not linear_scaling and
attention_mechanism == AttentionMechanism.IDENTITY):
# These interpolations need to be computed in `d^-1/2` scaling even if
# positional embeddings aren't used in `values`.
nngp_interp = weighted_sum(k.nngp, R12, kern_weight, pos_emb_weight)
ntk_interp = weighted_sum(k.ntk, R12, kern_weight, pos_emb_weight)
if linear_scaling:
def _get_weighting(mat, mask):
if mat is None:
return None
if not k.diagonal_batch:
mat = np.moveaxis(np.diagonal(mat, axis1=0, axis2=1), -1, 0)
if mask is not None:
mask = np.all(mask, axis=channel_axis, keepdims=True)
mask = np.squeeze(np.moveaxis(mask, (batch_axis, channel_axis),
(0, -1)), -1)
if k.is_reversed:
mask = np.moveaxis(mask,
range(1, mask.ndim),
range(mask.ndim -1, 0, -1))
mask = utils.interleave_ones(mask, 1, mask.ndim, x_first=False)
if attention_mechanism == AttentionMechanism.SOFTMAX:
mat = np.where(mask, _NEG_INF, mat)
else:
mat = np.where(mask, np.zeros((), mat.dtype), mat)
if attention_mechanism == AttentionMechanism.SOFTMAX:
axes = tuple(range(mat.ndim))
return attention_mechanism.fn()(QK_std * mat, axis=axes[2::2])
else:
return attention_mechanism.fn()(QK_std * mat)
def _weigh_kernel(mat, G1, G2=None):
if mat is not None and mat.ndim != 0:
G2 = G1 if G2 is None else G2
# Spatial axes
G1_dims = tuple(range(1, G1.ndim))
G2_dims = tuple(range(G1.ndim, G1.ndim + G2.ndim - 1))
mat_dims = utils.zip_flat(G1_dims[1::2], G2_dims[1::2])
res_dims = utils.zip_flat(G1_dims[::2], G2_dims[::2])
G1_dims = (0,) + G1_dims
# Batch axes
if mat.ndim % 2: # Even number of spatial axes + 1 or 2 batch axes
G2_dims = (0,) + G2_dims
mat_dims = (0,) + mat_dims
res_dims = (0,) + res_dims
else:
G2_dims = (-1,) + G2_dims
mat_dims = (0, -1) + mat_dims
res_dims = (0, -1) + res_dims
mat = np.einsum(G1, G1_dims, mat, mat_dims, G2, G2_dims, res_dims,
optimize=True)
return _affine(mat, OV_std, b_std)
G1 = _get_weighting(cov1_interp, k.mask1)
G2 = _get_weighting(cov2_interp, k.mask2)
cov1 = _weigh_kernel(cov1_interp if val_pos_emb else k.cov1, G1)
cov2 = _weigh_kernel(cov2_interp if val_pos_emb else k.cov2, G2)
nngp = _weigh_kernel(nngp_interp if val_pos_emb else k.nngp, G1, G2)
if k.ntk is None:
ntk = None
else:
ntk = _weigh_kernel(ntk_interp if val_pos_emb else k.ntk,
G1, G2) + 2 * (nngp if b_std is None
else (nngp - b_std**2))
elif attention_mechanism == AttentionMechanism.IDENTITY:
def dot(lhs, rhs, diagonal_batch=k.diagonal_batch):
if lhs is None:
return None
c_axes = tuple(range(1 if diagonal_batch else 2, lhs.ndim))
if rhs is None:
return np.sum(lhs**2, axis=c_axes, keepdims=True)
rhs = np.broadcast_to(rhs, lhs.shape)
b_axes = (0,) if diagonal_batch else (0, 1)
res = lax.dot_general(lhs, rhs, ((c_axes, c_axes), (b_axes, b_axes)))
return res.reshape(res.shape + (1,) * len(c_axes))
dot11 = dot(cov1_interp, None if val_pos_emb else k.cov1)
dot12 = dot(nngp_interp, None if val_pos_emb else k.nngp, False)
dot22 = dot(cov2_interp, None if val_pos_emb else k.cov2)
std = QK_std * OV_std
nngp = _affine(dot12 * nngp_interp, std, b_std)
cov1 = _affine(dot11 * cov1_interp, std, b_std)
cov2 = _affine(None if dot22 is None else dot22 * cov2_interp, std, b_std)
if ntk_interp is not None:
if val_pos_emb or pos_emb_type == PositionalEmbedding.NONE:
nngp_dot_ntk = dot(nngp_interp, ntk_interp, False)
ntk = 2 * nngp_dot_ntk
else:
nngp_dot_ntk_1 = dot(nngp_interp, k.ntk, False)
nngp_dot_ntk_2 = dot(k.nngp, ntk_interp, False)
ntk = (nngp_dot_ntk_1 + nngp_dot_ntk_2)
ntk = _affine(
ntk * nngp_interp + dot12 * (ntk_interp + 4 * nngp_interp),
std,
b_std)
else:
ntk = None
else:
raise NotImplementedError(f'No known closed form expression for square '
f'root scaling and {attention_mechanism} '
f'attention mechanism.')
return k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_gaussian=True)
def mask_fn(mask, input_shape):
return np.all(mask, channel_axis, keepdims=True)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def LayerNorm(
axis: Axes = -1,
eps: float = 1e-12,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayer:
"""Layer normalisation.
Args:
axis:
dimensions over which to normalize.
eps:
(small) positive constant to be added to the variance estimates in order
to prevent division by zero.
batch_axis:
batch dimension. Defaults to `0`, the leading axis.
channel_axis:
channel / feature dimension. Defaults to `-1`, the trailing axis. For
`kernel_fn`, channel size is considered to be infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def init_fn(rng, input_shape):
return input_shape, ()
def apply_fn(params, inputs, mask=None, **kwargs):
_axis = utils.canonicalize_axis(axis, inputs)
mean, var = mean_and_var(inputs, _axis, keepdims=True, mask=mask,
get_var=True)
return (inputs - mean) / np.sqrt(eps + var)
@requires(batch_axis=batch_axis, channel_axis=channel_axis)
def kernel_fn(k: Kernel, **kwargs):
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if not k.is_gaussian:
raise NotImplementedError('LayerNorm only implemented for Gaussian '
'inputs.')
ndim = len(k.shape1)
_channel_axis = channel_axis % ndim
_batch_axis = batch_axis % ndim
_axis = utils.canonicalize_axis(axis, k.shape1)
if _channel_axis not in _axis:
raise ValueError(f'Normalisation over channels (axis {_channel_axis})'
f'necessary for convergence to an asymptotic kernel; '
f'got axis={_axis}.')
_axis.remove(_channel_axis)
spatial_axes = tuple(i for i in range(len(k.shape1))
if i not in (_channel_axis, batch_axis))
# Batch axis
if _batch_axis in _axis:
kernel_axis = (0,)
_axis.remove(_batch_axis)
else:
kernel_axis = ()
# Spatial axes
kernel_axis += tuple(
1 + spatial_axes[::(-1 if k.is_reversed else 1)].index(i)
for i in _axis)
# Prepare masks for normalization
def prepare_mask(m):
if m is None:
return m
if m.shape[channel_axis] != 1:
raise NotImplementedError('`LayerNorm` with different per-channel masks'
'not implemented in the infinite limit.')
m = np.squeeze(m, channel_axis)
if k.is_reversed:
m = np.moveaxis(m, range(1, m.ndim), range(m.ndim - 1, 0, -1))
return m
prod11, prod12, prod22 = get_diagonal_outer_prods(
eps + cov1,
cov2 if cov2 is None else eps + cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul,
axis=kernel_axis,
mask1=prepare_mask(k.mask1),
mask2=prepare_mask(k.mask2),
)
nngp /= np.sqrt(prod12)
if ntk is not None:
ntk /= np.sqrt(prod12)
cov1 /= np.sqrt(prod11)
if cov2 is not None:
cov2 /= np.sqrt(prod22)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=False)
def Dropout(rate: float, mode: str = 'train') -> InternalLayer:
"""Dropout.
Based on :obj:`jax.example_libraries.stax.Dropout`.
Args:
rate:
Specifies the keep `rate`, e.g. `rate=1` is equivalent to keeping all
neurons.
mode:
Either `"train"` or `"test"`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if mode not in ('test', 'train'):
raise ValueError('The `mode` must be either "test" or "train".')
if rate <= 0. or rate > 1.:
raise ValueError('The `rate` must be > 0. and <= 1.')
init_fn, apply_fn = ostax.Dropout(rate, mode=mode)
kernel_fn_test = lambda k, **kwargs: k
@requires(use_dropout=True)
def kernel_fn_train(k: Kernel, **kwargs):
"""kernel_fn for `train` mode."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if k.is_input:
raise ValueError('Dropout cannot be applied to the input layer.')
factor = 1./rate
cov1 = _diag_mul(cov1, factor, k.diagonal_batch, k.diagonal_spatial)
cov2 = _diag_mul(cov2, factor, k.diagonal_batch, k.diagonal_spatial)
new_factor = np.where(k.x1_is_x2, factor, 1.)
nngp = _diag_mul(nngp, new_factor, False, k.diagonal_spatial)
ntk = _diag_mul(ntk, new_factor, False, k.diagonal_spatial)
# TODO(xlc): under which condition could we leave `is_gaussian` unchanged?
return k.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_gaussian=False)
kernel_fn = kernel_fn_test if mode == 'test' else kernel_fn_train
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=True)
def ImageResize(
shape: Sequence[int],
method: Union[str, jax.image.ResizeMethod],
antialias: bool = True,
precision: lax.Precision = lax.Precision.HIGHEST,
batch_axis: int = 0,
channel_axis: int = -1
) -> InternalLayerMasked:
"""Image resize function mimicking :obj:`jax.image.resize`.
Docstring adapted from
https://jax.readthedocs.io/en/latest/_modules/jax/_src/image/scale.html#resize
Note two changes:
1. Only `"linear"` and `"nearest"` interpolation methods are supported;
2. Set `shape[i]` to `-1` if you want dimension `i` of `inputs` unchanged.
The `method` argument expects one of the following resize methods:
`ResizeMethod.NEAREST`, `"nearest"`:
`Nearest neighbor interpolation`_. The values of `antialias` and `precision`
are ignored.
`ResizeMethod.LINEAR`, `"linear"`, `"bilinear"`, `"trilinear"`, `"triangle"`:
`Linear interpolation`_. If `antialias` is `True`, uses a triangular
filter when downsampling.
The following methods are NOT SUPPORTED in `kernel_fn` (only `init_fn` and
`apply_fn` work):
`ResizeMethod.CUBIC`, `"cubic"`, `"bicubic"`, `"tricubic"`:
`Cubic interpolation`_, using the Keys cubic kernel.
`ResizeMethod.LANCZOS3`, `"lanczos3"`:
`Lanczos resampling`_, using a kernel of radius 3.
`ResizeMethod.LANCZOS5`, `"lanczos5"`:
`Lanczos resampling`_, using a kernel of radius 5.
.. _Nearest neighbor interpolation:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
.. _Linear interpolation: https://en.wikipedia.org/wiki/Bilinear_interpolation
.. _Cubic interpolation: https://en.wikipedia.org/wiki/Bicubic_interpolation
.. _Lanczos resampling: https://en.wikipedia.org/wiki/Lanczos_resampling
Args:
shape:
the output shape, as a sequence of integers with length equal to
the number of dimensions of `image`. Note that :func:`resize` does not
distinguish spatial dimensions from batch or channel dimensions, so this
includes all dimensions of the image. To leave a certain dimension
(e.g. batch or channel) unchanged, set the respective entry to `-1`.
.. note::
Setting a `shape` entry to the respective size of the `input` also
works, but will make `kernel_fn` computation much more expensive with
no benefit. Further, note that `kernel_fn` does not support resizing the
`channel_axis`, therefore `shape[channel_axis]` should be set to `-1`.
method:
the resizing method to use; either a `ResizeMethod` instance or a
string. Available methods are: `"LINEAR"`, `"NEAREST"`. Other methods
like `"LANCZOS3"`, `"LANCZOS5"`, `"CUBIC"` only work for `apply_fn`, but
not `kernel_fn`.
antialias:
should an antialiasing filter be used when downsampling? Defaults to
`True`. Has no effect when upsampling.
precision:
`np.einsum` precision.
batch_axis:
batch axis for `inputs`. Defaults to `0`, the leading axis.
channel_axis:
channel axis for `inputs`. Defaults to `-1`, the trailing axis. For
`kernel_fn`, channel size is considered to be infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def _shape(input_shape):
return tuple(s if s != -1 else input_shape[i] for i, s in enumerate(shape))
def init_fn(rng, input_shape):
return _shape(input_shape), ()
def apply_fn(params, x, **kwargs):
return jax.image.resize(image=x,
shape=_shape(x.shape),
method=method,
antialias=antialias,
precision=precision)
def mask_fn(mask, input_shape):
"""Behavior of interpolation with masking.
Interpolation (except for "NEAREST") is done in float format:
https://github.com/google/jax/issues/3811. Float converted back to bool
rounds up all non-zero elements to `True`, so naively resizing the `mask`
will mark any output that has at least one contribution from a masked
input as fully masked. This can lead to mask growing unexpectedly, e.g.
consider a 5x5 image with a single masked pixel in the center:
>>> mask = np.array([[0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 0],
>>> [0, 0, 1, 0, 0],
>>> [0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 0]], dtype=np.bool_)
Downsampling this mask to 2x2 will mark all output pixels as masked!
>>> jax.image.resize(mask, (2, 2), method='bilinear').astype(np.bool_)
DeviceArray([[ True, True],
[ True, True]], dtype=bool)
Therefore, througout `stax` we rather follow the convention of marking
outputs as masked if they _only_ have contributions from masked elements
(in other words, we don't let the mask destroy information; let content
have preference over mask). For this we invert the mask before and after
resizing, to round up unmasked outputs instead.
"""
return ~jax.image.resize(image=~mask,
shape=_shape(mask.shape),
method=method,
antialias=antialias,
precision=precision).astype(np.bool_)
batch_axis, channel_axis = utils.mod((batch_axis, channel_axis), shape)
diagonal_batch = shape[batch_axis] == -1
diagonal_spatial = Diagonal(
input=Bool.NO
if any(shape[i] != -1 for i in range(len(shape))
if i not in (batch_axis, channel_axis))
else Bool.YES)
@requires(batch_axis=batch_axis,
channel_axis=channel_axis,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial)
def kernel_fn(k: Kernel, **kwargs) -> Kernel:
if isinstance(method, str):
_method = jax.image.ResizeMethod.from_string(method)
if _method not in (jax.image.ResizeMethod.LINEAR,
jax.image.ResizeMethod.NEAREST):
raise NotImplementedError(
f'Only "linear" (`jax.image.ResizeMethod.LINEAR`) and '
f'"nearest" (`jax.image.ResizeMethod.NEAREST`) interpolation is '
f'supported in `kernel_fn`, got {_method}.')
if shape[channel_axis] != -1:
raise ValueError(f'Resizing the channel axis {channel_axis} is not '
f'well-defined in the infinite-width limit. Please '
f'either set `shape[channel_axis] = -1` or file '
f'an issue describing your use case at '
f'https://github.com/google/neural-tangents/issues/new.')
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
diagonal_spatial = k.diagonal_spatial
def resize(k, shape1, shape2, diagonal_batch):
if k is None or k.ndim == 0:
return k
k_shape = (shape1[batch_axis],)
if not diagonal_batch:
k_shape += (shape2[batch_axis],)
for i, (s1, s2) in enumerate(zip(shape1, shape2)):
if i not in (batch_axis, channel_axis):
k_shape += (s1,)
if not diagonal_spatial:
k_shape += (s2,)
return jax.image.resize(image=k,
shape=k_shape,
method=_method,
antialias=antialias,
precision=precision)
shape1 = _shape(k.shape1)
shape2 = _shape(k.shape2)
k = k.replace(cov1=resize(cov1, shape1, shape1, k.diagonal_batch),
nngp=resize(nngp, shape1, shape2, False),
cov2=resize(cov2, shape2, shape2, k.diagonal_batch),
ntk=resize(ntk, shape1, shape2, False))
return k
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def Index(
idx: utils.SliceType,
batch_axis: int = 0,
channel_axis: int = -1
) -> InternalLayerMasked:
"""Index into the array mimicking :class:`numpy.ndarray` indexing.
Args:
idx:
a `slice` object that would result from indexing an array as `x[idx]`.
To create this object, use the helper object :obj:`Slice`, i.e. pass
`idx=stax.Slice[1:10, :, ::-1]` (which is equivalent to passing an
explicit `idx=(slice(1, 10, None), slice(None), slice(None, None, -1)`.
batch_axis:
batch axis for `inputs`. Defaults to `0`, the leading axis.
channel_axis:
channel axis for `inputs`. Defaults to `-1`, the trailing axis. For
`kernel_fn`, channel size is considered to be infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
Raises:
NotImplementedError:
If the `channel_axis` (infinite width) is indexed
(except for `:` or `...`) in the kernel regime (`kernel_fn`).
NotImplementedError:
If the `batch_axis` is indexed with an integer (as opposed to a tuple or
slice) in the kernel regime (`kernel_fn`), since the library currently
requires there always to be `batch_axis` in the kernel regime (while
indexing with integers removes the respective axis).
ValueError:
If `init_fn` is called on a shape with dummy axes (with sizes like `-1`
or `None`), that are indexed with non-trivial (not `:` or `...`) slices.
For indexing, the size of the respective axis needs to be specified.
Example:
>>> from neural_tangents import stax
>>> #
>>> init_fn, apply_fn, kernel_fn = stax.serial(
>>> stax.Conv(128, (3, 3)),
>>> stax.Relu(),
>>> # Select every other element from the batch (leading axis), cropped
>>> # to the upper-left 4x4 corner.
>>> stax.Index(idx=stax.Slice[::2, :4, :4])
>>> stax.Conv(128, (2, 2)),
>>> stax.Relu(),
>>> # Select the first row. Notice that the image becomes 1D.
>>> stax.Index(idx=stax.Slice[:, 0, ...])
>>> stax.Conv(128, (2,))
>>> stax.GlobalAvgPool(),
>>> stax.Dense(10)
>>> )
"""
def init_fn(rng, input_shape):
return utils.slice_shape(input_shape, idx), ()
def apply_fn(params, x, **kwargs):
return x[idx]
def mask_fn(mask, input_shape):
return mask[idx]
@requires(batch_axis=batch_axis, channel_axis=channel_axis)
def kernel_fn(k: Kernel, **kwargs) -> Kernel:
return k[idx]
return init_fn, apply_fn, kernel_fn, mask_fn
class _Slice:
def __getitem__(self, idx: utils.SliceType) -> utils.SliceType:
return idx
Slice = _Slice()
"""A helper object to pass the slicing index `idx` to the :obj:`Index` layer.
Since we cannot pass slice specifications like `1, :, 2:8:3` as function
arguments, pass `Slice[1, :, 2:8:3] == (1, slice(None), slice(2, 8, 3))`
instead.
"""
# INTERNAL UTILITIES
_CONV_KERNEL_DIMENSION_NUMBERS = ('NCHW', 'OIHW', 'NCHW')
def _affine(
mat: Optional[np.ndarray],
W_std: float,
b_std: Optional[float]) -> Optional[np.ndarray]:
"""Get covariances of affine outputs if inputs have covariances `nngp`.
The output is assumed to be `xW + b`, where `x` is the input, `W` is a matrix
of i.i.d. Gaussian weights with std `W_std`, `b` is a vector of i.i.d.
Gaussian biases with std `b_std`.
Args:
mat:
a `np.ndarray` containing sample-[sample-]position[-position] covariances
of inputs.
W_std:
standard deviation of a fully-connected layer weights.
b_std:
standard deviation of a fully-connected layer biases.
`None` means no bias.
Returns:
a `np.ndarray` containing sample-[sample-]position[-position] covariances
of FC outputs. Has the same shape as `nngp`.
"""
if mat is not None:
mat *= W_std**2
if b_std is not None:
mat += b_std**2
return mat
def _same_pad_for_filter_shape(
x: np.ndarray,
filter_shape: Sequence[int],
strides: Sequence[int],
axes: Sequence[int],
mode: str = 'wrap',
) -> np.ndarray:
"""Padding imitating :attr:`Padding.SAME` padding with :attr:`Padding.VALID`.
See `Returns` section for details. This function is usually needed to
implement :attr:`Padding.CIRCULAR` padding using :attr:`Padding.VALID`
padding.
Args:
x:
`np.ndarray` to pad, e.g. a 4D `NHWC` image.
filter_shape:
tuple of positive integers, the convolutional filters spatial shape (e.g.
`(3, 3)` for a 2D convolution).
strides:
tuple of positive integers, the convolutional spatial strides, e.g.
`(1, 1)` for a 2D convolution.
axes:
tuple of non-negative integers, the spatial axes to apply convolution
over (e.g. `(1, 2)` for an `NHWC` image).
mode:
a string, padding mode, for all options see
https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html.
Returns:
A `np.ndarray` of the same dimensionality as `x` padded to a potentially
larger shape such that a `"VALID"` convolution with `filter_shape` applied
to `x` over `axes` outputs an array of the same shape as `x`.
"""
axes_shape = tuple(np.size(x, axis) for axis in axes)
axes_pads = lax.padtype_to_pads(axes_shape, filter_shape, strides,
Padding.SAME.name)
pads = [(0, 0)] * x.ndim
for i, axis in enumerate(axes):
pads[axis] = axes_pads[i]
x = np.pad(x, pads, mode)
return x
def _same_pad_for_filter_shape_transpose(
x: np.ndarray,
axes: Sequence[int],
out_shape: Sequence[int]
) -> np.ndarray:
"""Transpose of the `_same_pad_for_filter_shape` function.
Unpads (crops) the array and fills each coordinate with the sum of all
elements at positions where the current element would appear during
`CIRCULAR` padding.
Args:
x:
`np.ndarray` to pad, e.g. a 4D `NHWC` image.
axes:
non-negative integers, the spatial axes to apply convolution
over (e.g. `(1, 2)` for an `NHWC` image).
out_shape:
target shape after cropping.
Returns:
A `np.ndarray` of shape `output_shape`.
"""
window_dimensions = tuple(
int(onp.ceil(x.shape[i] / out_shape[i])) // 2 * 2 + 1
if i in axes else 1 for i in range(x.ndim))
dilation = tuple(out_shape[i] if i in axes else 1 for i in range(x.ndim))
x = lax.reduce_window(
operand=x,
init_value=onp.zeros((), x.dtype),
computation=lax.add,
window_dimensions=window_dimensions,
window_strides=(1,) * x.ndim,
padding=Padding.SAME.name,
window_dilation=dilation
)
if x.shape != out_shape:
pads = [((x.shape[i] - out_shape[i]) // 2,
(x.shape[i] - out_shape[i]) - (x.shape[i] - out_shape[i]) // 2)
for i in range(x.ndim)]
slices = []
for axis in range(x.ndim):
if axis in axes:
slices += [slice(pads[axis][0], x.shape[axis] - pads[axis][1])]
else:
slices += [slice(None)]
x = x[tuple(slices)]
return x
def _pool_transpose(
x: np.ndarray,
filter_shape: Sequence[int],
strides: Sequence[int],
axes: Sequence[int],
padding: Padding
) -> np.ndarray:
"""Transpose convolution with an all-ones filter."""
n_spatial = len(axes)
x = np.moveaxis(x, axes, range(-n_spatial, 0))
split = -n_spatial or x.ndim
x_preshape = x.shape[:split]
x = x.reshape((-1, 1) + x.shape[split:])
rhs = np.ones(tuple(filter_shape) + (1, 1), x.dtype)
x = lax.conv_transpose(x,
rhs,
strides,
padding.name,
dimension_numbers=_get_dimension_numbers(n_spatial))
x = x.reshape(x_preshape + x.shape[2:])
x = np.moveaxis(x, range(-n_spatial, 0), axes)
return x
def _get_dimension_numbers(
n: int,
channels_first: bool = True
) -> Tuple[str, str, str]:
spatial_dims = ''.join(c for c in string.ascii_uppercase
if c not in ('N', 'C', 'I', 'O'))[:n]
if channels_first:
lhs_spec = 'NC' + spatial_dims
else:
lhs_spec = 'N' + spatial_dims + 'C'
dimension_numbers = (lhs_spec, spatial_dims + 'IO', lhs_spec)
return dimension_numbers
def _conv_kernel_full_spatial_shared(
lhs: Optional[np.ndarray],
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_ndim: int
) -> Optional[np.ndarray]:
"""Compute covariance of the CNN outputs given inputs with covariance `lhs`.
Used when `kernel.diagonal_spatial == False`.
Args:
lhs:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN inputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] height, height, width, width, depth,
depth, ...)`.
filter_shape:
positive integers, the convolutional filters spatial shape
(e.g. `(3, 3)` for a 2D convolution).
strides:
positive integers, the CNN strides (e.g. `(1, 1)` for a 2D
convolution).
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
batch_ndim:
number of batch dimensions, 1 or 2.
Returns:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN outputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] new_width, new_width, new_height,
new_height, new_depth, new_depth, ...)`.
"""
if lhs is None or lhs.ndim == 0:
return lhs
if padding == Padding.CIRCULAR:
spatial_axes = tuple(range(batch_ndim, lhs.ndim))
total_filter_shape = utils.double_tuple(filter_shape)
total_strides = utils.double_tuple(strides)
lhs = _same_pad_for_filter_shape(lhs,
total_filter_shape,
total_strides,
spatial_axes)
def lax_conv(lhs, rhs, strides, padding):
return lax.conv_general_dilated(
lhs, rhs, strides, padding,
dimension_numbers=_CONV_KERNEL_DIMENSION_NUMBERS,
feature_group_count=lhs.shape[
_CONV_KERNEL_DIMENSION_NUMBERS[0].index('C')])
def get_n_channels(batch_and_channels: int) -> int:
"""Get the hardware-friendly channel size for depthwise convolution.
Args:
batch_and_channels: total size of non-spatial dimensions.
Returns:
Suggested number of channels for depthwise-separable convolution.
"""
platform = jax.default_backend()
if platform in ['gpu', 'tpu']:
n_channels = batch_and_channels
# Find smallest `n_channels > 1` that divides `batch_and_features`; use
# depthwise-separable CNN. For `n_channels == 1` CuDNN appears to invoke a
# different algorithm (`void cudnn::detail::implicit_convolve_sgemm`) than
# in any other case (`conv2d_c1_k1_nchw_hw_packed_kernel`), and the latter
# seems many-fold faster.
# For TPU, start with `n_channels >= 128`. Beware of precision errors:
# TODO(romann): revisit based on b/154160868.
n_channels_min = 2 if platform == 'gpu' else 128
for n_c in range(n_channels_min, batch_and_channels):
if batch_and_channels % n_c == 0:
n_channels = n_c
break
elif platform == 'cpu':
# For CPU minimal channels seems best. Transpose convolution does not
# support depthwise operations.
n_channels = 1
else:
raise NotImplementedError(platform)
return n_channels
out = _conv_kernel_full_spatial_loop(lhs, filter_shape, strides, padding,
lax_conv, get_n_channels)
return out
def _conv_kernel_full_spatial_unshared(
lhs: Optional[np.ndarray],
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_ndim: int,
) -> Optional[np.ndarray]:
"""Compute covariance of unshared CNN given inputs with covariance `lhs`.
Used when `kernel.diagonal_spatial == False`. Has the same outputs on the
spatial diagonal as `_conv_kernel_full_spatial_shared`, but `0` in all
off-spatial-diagonal entries. The diagonal entries are computed via calling
`_conv_kernel_diagonal_spatial`.
Args:
lhs:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN inputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] height, height, width, width, depth,
depth, ...)`.
filter_shape:
positive integers, the convolutional filters spatial shape
(e.g. `(3, 3)` for a 2D convolution).
strides:
positive integers, the CNN strides (e.g. `(1, 1)` for a 2D
convolution).
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
batch_ndim:
number of batch dimensions, 1 or 2.
Returns:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN outputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] new_width, new_width, new_height,
new_height, new_depth, new_depth, ...)`.
"""
if lhs is None or lhs.ndim == 0:
return lhs
lhs = utils.unzip_axes(lhs, batch_ndim)
lhs_diag = utils.diagonal_between(lhs, batch_ndim)
out_diag = _conv_kernel_diagonal_spatial(lhs_diag, filter_shape, strides,
padding, batch_ndim)
out_diag_flat = out_diag.reshape((onp.prod(out_diag.shape[:batch_ndim]), -1))
out_flat = vmap(np.diag)(out_diag_flat)
out = out_flat.reshape(out_diag.shape[:batch_ndim] +
out_diag.shape[batch_ndim:] * 2)
out = utils.zip_axes(out, batch_ndim)
return out
def _conv_kernel_full_spatial_transpose(
lhs: Optional[np.ndarray],
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_ndim: int
) -> Optional[np.ndarray]:
"""Compute covariance of the CNN transpose given inputs with covariance `lhs`.
Used when `kernel.diagonal_spatial == False`.
Args:
lhs:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN inputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] height, height, width, width, depth,
depth, ...)`.
filter_shape:
positive integers, the convolutional filters spatial shape
(e.g. `(3, 3)` for a 2D convolution).
strides:
positive integers, the CNN strides (e.g. `(1, 1)` for a 2D
convolution).
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
batch_ndim:
number of batch dimensions, 1 or 2.
Returns:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of CNN outputs, where `S` is
the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,] new_width, new_width, new_height,
new_height, new_depth, new_depth, ...)`.
"""
if lhs is None or lhs.ndim == 0:
return lhs
def lax_conv(lhs, rhs, strides, padding):
return lax.conv_transpose(
lhs, rhs, strides, padding,
dimension_numbers=_CONV_KERNEL_DIMENSION_NUMBERS)
def get_n_channels(batch_and_channels: int) -> int:
"""Transpose convolution does not support depthwise separable filters."""
return 1
out = _conv_kernel_full_spatial_loop(lhs, filter_shape, strides, padding,
lax_conv, get_n_channels)
if padding == Padding.CIRCULAR:
spatial_axes = tuple(range(batch_ndim, out.ndim))
total_filter_shape = utils.double_tuple(filter_shape)
total_strides = utils.double_tuple(strides)
out_shape = eval_shape(lambda x: _pool_transpose(x,
total_filter_shape,
total_strides,
spatial_axes,
Padding.SAME), lhs).shape
out = _same_pad_for_filter_shape_transpose(
x=out,
axes=spatial_axes,
out_shape=utils.reverse_zipped(out_shape, batch_ndim))
return out
def _conv_kernel_full_spatial_loop(
lhs: np.ndarray,
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
lax_conv: Callable[
[np.ndarray, np.ndarray, Tuple[int, ...], str], np.ndarray],
get_n_channels: Callable[[int], int]
) -> np.ndarray:
padding = Padding.VALID if padding == Padding.CIRCULAR else padding
def get_rhs(n_channels: int, filter_size: int) -> np.ndarray:
rhs = np.diag(np.full((filter_size,), 1. / filter_size, lhs.dtype))
rhs_shape = ()
for c in _CONV_KERNEL_DIMENSION_NUMBERS[1]:
if c == 'O':
rhs_shape += (n_channels,)
elif c == 'I':
rhs_shape += (1,)
else:
rhs_shape += (filter_size,)
rhs = np.broadcast_to(rhs, rhs_shape)
return rhs
batch_ndim = lhs.ndim - len(filter_shape) * 2
for i in range(lhs.ndim - 1, batch_ndim, -2):
spatial_i = (i - batch_ndim) // 2
lhs = np.moveaxis(lhs, (i - 1, i), (-2, -1))
preshape = lhs.shape[:-2]
n_channels = get_n_channels(utils.size_at(preshape))
lhs = lhs.reshape((-1, n_channels, lhs.shape[-2], lhs.shape[-1]))
rhs = get_rhs(n_channels, filter_shape[spatial_i])
lhs = lax_conv(lhs, rhs, (strides[spatial_i],) * 2, padding.name)
lhs = lhs.reshape(preshape + lhs.shape[-2:])
return lhs
def _conv_kernel_diagonal_spatial(
lhs: Optional[np.ndarray],
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_ndim: int
) -> Optional[np.ndarray]:
"""Compute covariance of the CNN outputs given inputs with covariance `lhs`.
Used when `kernel.diagonal_spatial == True`.
Args:
lhs:
an `(S+batch_ndim)`-dimensional `np.ndarray` containing
sample-sample-(same position) covariances of CNN inputs. Has `batch_ndim`
batch and `S` spatial dimensions with the shape of `(batch_size_1,
[batch_size_2,] height, width, depth, ...)`.
filter_shape:
tuple of positive integers, the convolutional filters spatial shape
(e.g. `(3, 3)` for a 2D convolution).
strides:
tuple of positive integers, the CNN strides (e.g. `(1, 1)` for a 2D
convolution).
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
batch_ndim:
number of leading batch dimensions, 1 or 2.
Returns:
an `(S+batch_ndim)`-dimensional `np.ndarray` containing
sample-sample-(same position) covariances of CNN outputs. Has `batch_ndim`
batch and `S` spatial dimensions with the shape of `(batch_size_1,
[batch_size_2,] new_height, new_width, new_depth, ...)`.
"""
if lhs is None or lhs.ndim == 0:
return lhs
spatial_axes = tuple(range(batch_ndim, lhs.ndim))
apply_padding = Padding.VALID if padding == Padding.CIRCULAR else padding
if padding == Padding.CIRCULAR:
lhs = _same_pad_for_filter_shape(lhs, filter_shape, strides, spatial_axes)
lhs = lax.reduce_window(
operand=lhs,
init_value=onp.zeros((), lhs.dtype),
computation=lax.add,
window_dimensions=(1,) * batch_ndim + tuple(filter_shape),
window_strides=(1,) * batch_ndim + tuple(strides),
padding=apply_padding.name)
filter_size = functools.reduce(op.mul, filter_shape, 1)
return lhs / filter_size
def _conv_kernel_diagonal_spatial_transpose(
lhs: Optional[np.ndarray],
filter_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_ndim: int
) -> Optional[np.ndarray]:
"""Compute covariance of the CNN transpose given inputs with covariance `lhs`.
Used when `kernel.diagonal_spatial == True`.
Args:
lhs:
an `(S+batch_ndim)`-dimensional `np.ndarray` containing
sample-sample-(same position) covariances of CNN inputs. Has `batch_ndim`
batch and `S` spatial dimensions with the shape of `(batch_size_1,
[batch_size_2,] height, width, depth, ...)`.
filter_shape:
tuple of positive integers, the convolutional filters spatial shape
(e.g. `(3, 3)` for a 2D convolution).
strides:
tuple of positive integers, the CNN strides (e.g. `(1, 1)` for a 2D
convolution).
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
batch_ndim:
number of leading batch dimensions, 1 or 2.
Returns:
an `(S+batch_ndim)`-dimensional `np.ndarray` containing
sample-sample-(same position) covariances of CNN outputs. Has `batch_ndim`
batch and `S` spatial dimensions with the shape of `(batch_size_1,
[batch_size_2,] new_height, new_width, new_depth, ...)`.
"""
if lhs is None or lhs.ndim == 0:
return lhs
spatial_axes = tuple(range(batch_ndim, lhs.ndim))
apply_padding = Padding.VALID if padding == Padding.CIRCULAR else padding
out = _pool_transpose(lhs, filter_shape, strides, spatial_axes, apply_padding)
if padding == Padding.CIRCULAR:
out_shape = eval_shape(lambda x: _pool_transpose(
x,
filter_shape,
strides,
spatial_axes,
padding.SAME), lhs).shape
out = _same_pad_for_filter_shape_transpose(out, spatial_axes, out_shape)
filter_size = functools.reduce(op.mul, filter_shape, 1)
return out / filter_size
def _pool_kernel(
lhs: np.ndarray,
pool_type: _Pooling,
window_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
normalize_edges: bool,
batch_ndim: int
) -> np.ndarray:
"""Get covariances of pooling outputs given inputs covariances `lhs`.
Args:
lhs:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of pooling inputs, where `S`
is the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,]
height, height, width, width, depth, depth, ...)`.
pool_type:
a `Pooling` enum, e.g. `Pooling.AVG`.
window_shape:
tuple of positive integers, the pooling spatial shape (e.g. `(3, 3)`).
strides:
tuple of positive integers, the pooling strides, e.g. `(1, 1)`.
padding:
a `Padding` enum, e.g. `Padding.CIRCULAR`.
normalize_edges:
`True` to normalize output by the effective receptive field, `False` to
normalize by the window size. Only has effect at the edges when `SAME`
padding is used. Set to `True` to retain correspondence to
`ostax.AvgPool`.
batch_ndim:
number of leading batch dimensions, 1 or 2.
Returns:
a `(2*S+batch_ndim)`-dimensional `np.ndarray` containing
sample-[sample-]position-position covariances of pooling outputs, where
`S` is the number of spatial dimensions (e.g. 2 for images). Has shape
`(batch_size_1, [batch_size_2,]
height, height, width, width, depth, depth, ...)`.
"""
if padding == Padding.CIRCULAR:
spatial_axes = tuple(range(batch_ndim, lhs.ndim))
lhs = _same_pad_for_filter_shape(lhs, window_shape, strides, spatial_axes)
padding = Padding.VALID
window_shape = (1,) * batch_ndim + tuple(window_shape)
strides = (1,) * batch_ndim + tuple(strides)
out = lax.reduce_window(lhs, 0., lax.add, window_shape, strides, padding.name)
if pool_type == _Pooling.AVG:
out = _normalize(lhs, out, normalize_edges, padding, strides, window_shape)
return out
def _normalize(lhs, out, normalize_edges, padding, strides, window_shape):
if padding == Padding.SAME and normalize_edges:
# `SAME` padding in :obj:`jax.example_libraries.stax.AvgPool` normalizes by
# actual window size, which is smaller at the edges.
one = np.ones_like(lhs, lhs.dtype)
window_sizes = lax.reduce_window(one, 0., lax.add, window_shape, strides,
padding.name)
out /= window_sizes
else:
out /= onp.prod(window_shape)
return out
def _diag_mul_full_spatial(
x: np.ndarray,
factor: float,
diagonal_batch: bool) -> np.ndarray:
if diagonal_batch:
idx = (slice(None),)
batch_ndim = 1
else:
if x.shape[0] != x.shape[1]:
return x
idx = ()
batch_ndim = 2
ndims = x.ndim // 2
for i in range(ndims):
shape = [1] * ndims
size = x.shape[2 - batch_ndim + 2 * i]
shape[i] = size
idx += (np.arange(size).reshape(shape),) * 2
x = x.at[idx].mul(factor)
return x
def _diag_mul_diagonal_spatial(
x: np.ndarray,
factor: float,
diagonal_batch: bool) -> np.ndarray:
if diagonal_batch:
x *= factor
else:
if x.shape[0] != x.shape[1]:
return x
idx = np.diag_indices(x.shape[0]) + (Ellipsis,)
x = x.at[idx].mul(factor)
return x
def _diag_mul(
x: Optional[np.ndarray],
factor: float,
diagonal_batch: bool,
diagonal_spatial: bool) -> Optional[np.ndarray]:
if x is None:
return x
if diagonal_spatial:
return _diag_mul_diagonal_spatial(x, factor, diagonal_batch)
return _diag_mul_full_spatial(x, factor, diagonal_batch)
def _vmap_2d(fn: Callable[[float, float, float], float],
cov12: np.ndarray,
var1: np.ndarray,
var2: Optional[np.ndarray],
diagonal_batch: bool,
diagonal_spatial: bool) -> np.ndarray:
"""Effectively a "2D vmap" of `fn(cov12, var1, var2)`.
Applicable for all possible kernel layouts.
Args:
fn:
scalar-valued, elementwise `fn(cov12, var1, var2)` function to apply.
cov12:
covariance tensor (`q12`), `nngp`/`ntk`/`cov1`/`cov2`, of shape
`(N1[, N2])`, `(N1[, N2], X, Y, ...)`, `(N1[, N2], X, X, Y, Y, ...)`
depending on `diagonal_batch`, `diagonal_spatial`, and the number of
spatial dimensions.
var1:
variance tensor (`q11`), has shape `(N1[, X, Y, ...])`.
var2:
variance tensor (`q22`), has shape `(N1[, X, Y, ...])`.
diagonal_batch:
`True` if `cov12` has only one batch dimension.
diagonal_spatial:
`True` if `cov12` has spatial dimensions appearing once (vs twice).
Returns:
Resulting array `[fn(cov12[i, j], var1[i], var2[j])]_{i j}`. Has the same
shape as `cov12`.
"""
batch_ndim = 1 if diagonal_batch else 2
start = 2 - batch_ndim
cov_end = batch_ndim if diagonal_spatial else cov12.ndim
_cov12 = utils.make_2d(cov12, start, cov_end)
var_end = 1 if diagonal_spatial else var1.ndim
var1 = var1.reshape(var1.shape[:start] + (-1,) + var1.shape[var_end:])
var2 = var1 if var2 is None else var2.reshape(var2.shape[:start] + (-1,) +
var2.shape[var_end:])
fn = vmap(
vmap(
np.vectorize(fn),
in_axes=(start, None, start),
out_axes=start
),
in_axes=(start, start, None),
out_axes=start
)
out = fn(_cov12, var1, var2) # type: np.ndarray
out_shape = (cov12.shape[:start] +
cov12.shape[start:cov_end:2] +
cov12.shape[start + 1:cov_end:2] +
cov12.shape[cov_end:])
out = out.reshape(out_shape)
out = utils.zip_axes(out, start, cov_end)
return out
# MASKING
_NEG_INF = -1e20 # softmax raises an error if all entries are -np.inf
def _check_is_implemented(mask: np.ndarray, channel_axis: int) -> None:
if mask.shape[channel_axis] != 1:
raise NotImplementedError(
'Different channel-wise masks as inputs to '
'pooling layers are not yet supported. Please '
'let us know about your use case at '
'https://github.com/google/neural-tangents/issues/new')
def _pool_mask(
mask: np.ndarray,
window_shape: Sequence[int],
strides: Sequence[int],
padding: Padding,
batch_axis: int,
channel_axis: int) -> np.ndarray:
window_shape = list(window_shape)
strides = list(strides)
non_spatial_axes = utils.canonicalize_axis((batch_axis, channel_axis), mask)
for i in non_spatial_axes:
window_shape.insert(i, 1)
strides.insert(i, 1)
# Get the output shape.
out_shape = eval_shape(lambda x: lax.reduce_window(
operand=x,
init_value=np.zeros((), x.dtype),
computation=op.or_,
window_dimensions=window_shape,
window_strides=strides,
padding=padding.name
), mask).shape
# If shapes don't match, stride through the mask.
if mask.shape != out_shape:
pads = lax.padtype_to_pads(mask.shape, window_shape, strides, padding.name)
slices = ()
for i in range(mask.ndim):
start = - pads[i][0] + (window_shape[i] - 1) // 2
end = start + 1 + (out_shape[i] - 1) * strides[i]
slices += (slice(start, end, strides[i]),)
mask = mask[slices]
if mask.shape != out_shape:
raise ValueError(f'Mask shape must be {out_shape}, but got {mask.shape}, '
f'please submit a bug to '
f'https://github.com/google/neural-tangents/issues/new.')
return mask
def _pooling_layer(reducer, init_val, rescaler=None):
"""Adapted from :obj:`jax.example_libraries.stax`."""
def PoolingLayer(window_shape, strides=None, padding='VALID', spec=None):
"""Pooling."""
window_shape = tuple(window_shape)
strides = strides or (1,) * len(window_shape)
rescale = rescaler(window_shape, strides, padding) if rescaler else None
if spec is None:
non_spatial_axes = 0, len(window_shape) + 1
else:
non_spatial_axes = spec.index('N'), spec.index('C')
for i in sorted(non_spatial_axes):
window_shape = window_shape[:i] + (1,) + window_shape[i:]
strides = strides[:i] + (1,) + strides[i:]
def init_fun(rng, input_shape):
padding_vals = lax.padtype_to_pads(input_shape, window_shape,
strides, padding)
ones = (1,) * len(window_shape)
out_shape = lax.reduce_window_shape_tuple(
input_shape, window_shape, strides, padding_vals, ones, ones)
return out_shape, ()
def apply_fun(params, inputs, **kwargs):
out = lax.reduce_window(inputs, init_val, reducer, window_shape,
strides, padding)
return rescale(out, inputs, spec) if rescale else out
return init_fun, apply_fun
return PoolingLayer
# POSITIONAL EMBEDDINGS
def _pos_emb_identity(shape: Sequence[int]) -> np.ndarray:
size = utils.size_at(shape) # pytype: disable=wrong-arg-types # jax-ndarray
R = np.eye(size).reshape(tuple(shape) * 2)
R = utils.zip_axes(R)
return R
def _pos_emb_pdist(shape: Sequence[int],
pos_emb_p_norm: Optional[float],
pos_emb_decay_fn: Optional[Callable[[float], float]]
) -> np.ndarray:
if pos_emb_decay_fn is None:
# Identity / one-hot positional embeddings.
return _pos_emb_identity(shape)
# Pairwise distance-based positional embeddings.
ndim = len(shape)
R = np.zeros((1,) * (ndim * 2))
for axis in range(ndim):
d = np.arange(shape[axis])
pd = utils.outer_prod(d, d, 0, d.ndim, op.sub)
pd = pd.reshape((1,) * (2 * axis) +
pd.shape +
(1,) * (2 * (ndim - axis - 1)))
R += np.abs(pd) ** pos_emb_p_norm
R = pos_emb_decay_fn(R)
return R # pytype: disable=bad-return-type # jax-ndarray
def _get_all_pos_emb(k: Kernel,
pos_emb_type: PositionalEmbedding,
pos_emb_p_norm: float,
pos_emb_decay_fn: Optional[Callable[[float], float]]
) -> Tuple[Optional[np.ndarray],
Optional[np.ndarray],
Optional[np.ndarray]]:
if pos_emb_type == PositionalEmbedding.NONE:
return None, None, None
shape, _ = _shape_and_axes(k.shape1, (k.batch_axis, k.channel_axis))
R = _pos_emb_pdist(shape, pos_emb_p_norm, pos_emb_decay_fn)
if k.is_reversed:
R = utils.reverse_zipped(R)
batch_ndim = 1 if k.diagonal_batch else 2
R11 = np.expand_dims(R, tuple(range(batch_ndim)))
R12 = R11 if batch_ndim == 2 else np.expand_dims(R, (0, 1))
R22 = None if k.cov2 is None else R11
mask11, mask12, mask22 = k._get_mask_prods(k.mask1, k.mask2)
R11 = utils.mask(R11, mask11)
R12 = utils.mask(R12, mask12)
R22 = utils.mask(R22, mask22)
return R11, R12, R22
def _shape_and_axes(
x: Tuple[int, ...],
ignore_axes: Iterable[int] = ()
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
ndim = len(x)
ignore_axes = tuple(i % ndim for i in ignore_axes)
axes = tuple(i for i in range(ndim) if i not in ignore_axes)
shape = tuple(x[i] for i in axes)
return shape, axes
| 133,918 | 34.223304 | 115 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/stax/branching.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Branching functions.
These layers split an input into multiple branches or fuse multiple inputs from
several branches into one.
"""
import functools
from typing import Callable, Iterable, List, Optional, Sequence, Tuple
import warnings
from jax import numpy as np
import jax.example_libraries.stax as ostax
from .requirements import layer, supports_masking
from ..utils.kernel import Kernel
from ..utils.typing import InternalLayer, InternalLayerMasked, Kernels
@layer
def FanOut(num: int) -> InternalLayer:
"""Fan-out.
This layer takes an input and produces `num` copies that can be fed into
different branches of a neural network (for example with residual
connections).
Args:
num: The number of going edges to fan out into.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanOut(num)
kernel_fn = lambda k, **kwargs: [k] * num
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=False)
def FanInSum() -> InternalLayerMasked:
"""Fan-in sum.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and sums the inputs to produce a single
output. Based on :obj:`jax.example_libraries.stax.FanInSum`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanInSum
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
if not all([k.shape1 == ks[0].shape1 and
k.shape2 == ks[0].shape2 for k in ks[1:]]):
raise ValueError('All shapes should be equal in `FanInSum/FanInProd`, '
f'got `x1.shape`s of {[k.shape1 for k in ks]}, '
f'`x2.shape`s of {[k.shape2 for k in ks]}.')
is_gaussian = all(k.is_gaussian for k in ks)
if not is_gaussian and len(ks) != 1:
# TODO(xlc): FanInSum/FanInConcat could allow non-Gaussian inputs, but
# we need to propagate the mean of the random variables as well.
raise NotImplementedError('`FanInSum` is only implemented for the '
'case where all input layers guaranteed to be '
'mean-zero Gaussian, i.e. having all '
'`is_gaussian` set to `True`, got '
f'{[k.is_gaussian for k in ks]}.')
_mats_sum = lambda mats: None if mats[0] is None else sum(mats)
cov1s = [k.cov1 for k in ks]
cov2s = [k.cov2 for k in ks]
nngps = [k.nngp for k in ks]
ntks = [k.ntk for k in ks]
cov1, cov2, nngp, ntk = map(_mats_sum, (cov1s, cov2s, nngps, ntks))
return Kernel(cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=ks[0].diagonal_batch,
diagonal_spatial=ks[0].diagonal_spatial,
shape1=ks[0].shape1,
shape2=ks[0].shape2,
batch_axis=ks[0].batch_axis,
channel_axis=ks[0].channel_axis,
mask1=None,
mask2=None) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _sum_masks(mask)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def FanInProd() -> InternalLayerMasked:
"""Fan-in product.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and elementwise-multiplies the inputs to
produce a single output.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, _ = ostax.FanInSum
def apply_fn(params, inputs, **kwargs):
return functools.reduce(np.multiply, inputs)
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
if not all([k.shape1 == ks[0].shape1 and
k.shape2 == ks[0].shape2 for k in ks[1:]]):
raise ValueError('All shapes should be equal in `FanInProd`.')
is_gaussian = len(ks) == 1 and ks[0].is_gaussian
def _mats_prod(nngps, ntks):
if None in ntks:
return functools.reduce(np.multiply, nngps), None
nngp_prod, ntk_prod = 1., 0.
for nngp, ntk in zip(nngps, ntks):
ntk_prod = ntk_prod * nngp + nngp_prod * ntk
nngp_prod *= nngp
return nngp_prod, ntk_prod
cov1s = [k.cov1 for k in ks]
cov2s = [k.cov2 for k in ks]
nngps = [k.nngp for k in ks]
ntks = [k.ntk for k in ks]
cov1 = functools.reduce(np.multiply, cov1s)
cov2 = None if None in cov2s else functools.reduce(np.multiply, cov2s)
nngp, ntk = _mats_prod(nngps, ntks)
return Kernel(cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=ks[0].diagonal_batch,
diagonal_spatial=ks[0].diagonal_spatial,
shape1=None,
shape2=None,
batch_axis=ks[0].batch_axis,
channel_axis=ks[0].channel_axis,
mask1=None,
mask2=None) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _sum_masks(mask)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def FanInConcat(axis: int = -1) -> InternalLayerMasked:
"""Fan-in concatenation.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and concatenates the inputs to produce a
single output. Based on :obj:`jax.example_libraries.stax.FanInConcat`.
Args:
axis: Specifies the axis along which input tensors should be concatenated.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanInConcat(axis)
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
diagonal_batch = ks[0].diagonal_batch
diagonal_spatial = ks[0].diagonal_spatial
shape1, shape2 = ks[0].shape1, ks[0].shape2
ndim = len(shape1)
_axis = axis % ndim
batch_axis = ks[0].batch_axis
channel_axis = ks[0].channel_axis
new_shape1 = shape1[:_axis] + shape1[_axis + 1:]
new_shape2 = shape2[:_axis] + shape2[_axis + 1:]
for k in ks:
k_shape1 = k.shape1[:_axis] + k.shape1[_axis + 1:]
k_shape2 = k.shape2[:_axis] + k.shape2[_axis + 1:]
if k_shape1 != new_shape1 or k_shape2 != new_shape2:
raise ValueError('Non-`axis` shapes should be equal in `FanInConcat`.')
# Check if inputs are independent Gaussians.
if _axis != channel_axis:
is_gaussian = all(k.is_gaussian for k in ks)
if not is_gaussian:
# TODO(xlc): FanInSum/FanInConcat could allow non-Gaussian inputs, but
# we need to propagate the mean of the random variables as well.
raise NotImplementedError(
'`FanInConcat` layer along the non-channel axis is only implemented'
'for the case if all input layers guaranteed to be mean-zero '
'Gaussian, i.e. having all `is_gaussian` set to `True`.')
else:
# TODO(romann): allow nonlinearity after channelwise concatenation.
# TODO(romann): support concatenating different channelwise masks.
is_gaussian = False
if _axis == batch_axis:
warnings.warn(f'Concatenation along the batch axis ({_axis}) gives '
f'inconsistent covariances when batching - '
f'proceed with caution.')
spatial_axes = tuple(i for i in range(ndim)
if i not in (channel_axis, batch_axis))
# Change spatial axis according to the kernel `is_reversed`.
if _axis in spatial_axes and is_reversed:
_axis = spatial_axes[::-1][spatial_axes.index(_axis)]
# Map activation tensor axis to the covariance tensor axis.
tensor_axis_to_kernel_axis = {
**{
batch_axis: 0,
channel_axis: -1,
},
**{
spatial_axis: idx + 1
for idx, spatial_axis in enumerate(spatial_axes)
}
}
_axis = tensor_axis_to_kernel_axis[_axis]
widths = [k.shape1[channel_axis] for k in ks]
cov1 = _concat_kernels([k.cov1 for k in ks], _axis,
diagonal_batch, diagonal_spatial, widths)
cov2 = _concat_kernels([k.cov2 for k in ks], _axis,
diagonal_batch, diagonal_spatial, widths)
nngp = _concat_kernels([k.nngp for k in ks], _axis,
False, diagonal_spatial, widths)
ntk = _concat_kernels([k.ntk for k in ks], _axis,
False, diagonal_spatial, widths)
return Kernel(cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
shape1=None,
shape2=None,
batch_axis=batch_axis,
channel_axis=channel_axis,
mask1=None,
mask2=None) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _concat_masks(mask, input_shape, axis)
return init_fn, apply_fn, kernel_fn, mask_fn
# INTERNAL UTILITIES
def _map_tuples(fn: Callable, tuples: Iterable[Tuple]) -> Tuple:
return tuple(map(fn, zip(*(t for t in tuples))))
def _sum_masks(masks: List[Optional[np.ndarray]]) -> Optional[np.ndarray]:
def add_two_masks(mask1, mask2):
if mask1 is None:
return mask2
if mask2 is None:
return mask1
return mask1 & mask2
mask = functools.reduce(add_two_masks, masks, None)
return mask
def _concat_masks(
masks: List[Optional[np.ndarray]],
input_shapes: Sequence[Sequence[int]],
axis: int) -> Optional[np.ndarray]:
"""Returns a mask which is a concatenation of `masks`.
Since elements of `masks` can have any shapes broadcastable to respective
elements of `input_shapes`, their concatenation may require broadcasting and
cannot be done with a single `np.concatenate` call.
Args:
masks: list of masks to concatenate.
input_shapes: list of input shapes to which the masks are applied.
axis: concatenation axis.
Returns:
A single `np.ndarray` mask applicable to the concatenated inputs.
"""
if len(masks) != len(input_shapes):
raise ValueError(f'Number of masks ({len(masks)}) and inputs '
f'({len(input_shapes)}) don\'t match, please file a bug at'
f' https://github.com/google/neural-tangents/issues/new.')
if all(m is None for m in masks):
return None
axis %= len(input_shapes[0])
# Expand the concatenation dimension of each mask.
masks = [m if m is None else np.broadcast_to(
m,
(m.shape[:axis] +
tuple(input_shapes[i][axis: axis + 1]) +
m.shape[axis + 1:]))
for i, m in enumerate(masks)]
# Max shape to broadcast all masks to along non-concat dimension.
max_shape = _map_tuples(max, (m.shape for m in masks if m is not None))
# Shape of the mask to replace `None` masks with.
max_shapes = [tuple(map(min, max_shape, i)) for i in input_shapes]
masks = [
(np.broadcast_to(
m,
max_shape[:axis] + m.shape[axis: axis + 1] + max_shape[axis + 1:])
if m is not None
else np.zeros_like(max_shapes[i], dtype=np.bool_))
for i, m in enumerate(masks)
]
return np.concatenate(masks, axis)
def _preprocess_kernels_for_fan_in(ks: Kernels) -> Tuple[List[Kernel], bool]:
# Check diagonal requirements.
if not all(k.diagonal_batch == ks[0].diagonal_batch and
k.diagonal_spatial == ks[0].diagonal_spatial and
k.batch_axis == ks[0].batch_axis and
k.channel_axis == ks[0].channel_axis
for k in ks[1:]):
raise NotImplementedError('`FanIn` layers are only implemented for the '
'case if all input layers output the same layout '
'of covariance matrices, i.e. having all '
'matching `diagonal_batch` and '
'`diagonal_spatial` and other attributes.')
# If kernels have different spatial axes order, transpose some of them.
n_kernels = len(ks)
n_reversed = sum(ker.is_reversed for ker in ks)
ks = list(ks)
if n_reversed > n_kernels / 2:
is_reversed = True
for i in range(n_kernels):
if not ks[i].is_reversed:
ks[i] = ks[i].reverse()
else:
is_reversed = False
for i in range(n_kernels):
if ks[i].is_reversed:
ks[i] = ks[i].reverse()
# Warnings.
warnings.warn('`FanIn` layers assume independent inputs which is not verified'
' in the code. Please make sure to have at least one `Dense` / '
'`Conv` / `GlobalSelfAttention` etc. layer in each branch.')
return ks, is_reversed
def _concat_kernels(
mats: Sequence[Optional[np.ndarray]],
axis: int,
diagonal_batch: bool,
diagonal_spatial: bool,
widths: Sequence[int]) -> Optional[np.ndarray]:
"""Compute the covariance of concatenated activations with given covariances.
Args:
mats: Covariance tensors of the same shape.
axis: Specifies the axis along which the covariances (not activations) are
concatenated. `-1` corresponds to averaging.
diagonal_batch: Specifies whether `cov1` and `cov2` store only
the diagonal of the sample-sample covariance
(`diagonal_batch == True`,
`cov1.shape == (batch_size_1, ...)`),
or the full covariance
(`diagonal_batch == False`,
`cov1.shape == (batch_size_1, batch_size_1, ...)`).
diagonal_spatial: Specifies whether only the diagonals of the
location-location covariances will be computed,
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, depth, ...)`),
or the full covariance
(`diagonal_spatial == False`,
`nngp.shape == (batch_size_1, batch_size_2, height, height,
width, width, depth, depth, ...)`).
widths: list of integer channel widths of the finite model inputs.
Returns:
A new `np.ndarray` representing covariance between concatenated activations.
"""
if mats[0] is None:
return None
n_mats = len(mats)
mat_ndim = mats[0].ndim
# Averaging if concatenating along features or diagonalized dimension.
if axis == -1:
if all(w == widths[0] for w in widths):
widths = [1] * len(widths)
mat = sum(mats[i] * widths[i] for i in range(n_mats)) / sum(widths)
# Simple concatenation along the axis if the axis is not duplicated.
elif ((axis == 0 and diagonal_batch) or
(axis != 0 and diagonal_spatial)):
concat_axis = axis + (0 if diagonal_batch else 1)
mat = np.concatenate(mats, concat_axis)
# 2D concatenation with insertion of 0-blocks if the axis is present twice.
else:
rows = []
pad_axis = max(0, 2 * axis - (1 if diagonal_batch else 0))
for i, mat in enumerate(mats):
pads = [(0, 0)] * mat_ndim
pads[pad_axis] = (
sum(mats[j].shape[pad_axis] for j in range(i)),
sum(mats[j].shape[pad_axis] for j in range(i + 1, n_mats))
)
rows.append(np.pad(mat, pads))
mat = np.concatenate(rows, pad_axis + 1)
return mat
| 16,445 | 33.623158 | 80 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/stax/elementwise.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elementwise nonlinearities / activation functions.
For details, please see "`Fast Neural Kernel Embeddings for General Activations
<https://arxiv.org/abs/2209.04121>`_".
"""
import functools
import operator as op
from typing import Callable, Optional, Sequence, Tuple
import warnings
import jax
from jax import custom_jvp, grad, vmap
from jax import numpy as np
from jax.scipy.special import erf
import numpy as onp
from .requirements import Diagonal, get_diagonal, get_diagonal_outer_prods, layer, requires, supports_masking
import scipy as osp
from ..utils import utils
from ..utils.kernel import Kernel
from ..utils.typing import InternalLayer, LayerKernelFn
@layer
@supports_masking(remask_kernel=True)
def Erf(
a: float = 1.,
b: float = 1.,
c: float = 0.) -> InternalLayer:
"""Affine transform of `Erf` nonlinearity, i.e. `a * Erf(b * x) + c`.
Args:
a: output scale.
b: input scale.
c: output shift.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return a * erf(b * x) + c
def kernel_fn(k: Kernel) -> Kernel:
k *= b
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
cov1_denom = 1 + 2 * cov1
cov2_denom = None if cov2 is None else 1 + 2 * cov2
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1_denom,
cov2_denom,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
factor = 2 / np.pi
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
square_root = _sqrt(prod - 4 * nngp**2)
nngp = factor * np.arctan2(2 * nngp, square_root)
if ntk is not None:
dot_sigma = 2 * factor / square_root
ntk *= dot_sigma
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
return factor * np.arctan2(nngp, np.sqrt(nngp + 1. / 4))
nngp, ntk = nngp_ntk_fn(nngp, prod12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22)
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return a * k + c
return _elementwise(fn, f'Erf({a}, {b}, {c})', kernel_fn)
def Sigmoid_like():
"""A sigmoid like function `f(x) = .5 * erf(x / 2.4020563531719796) + .5`.
The constant `2.4020563531719796` is chosen so that the squared loss between
this function and the ground truth sigmoid is minimized on the interval
`[-5, 5]`; see
https://gist.github.com/SiuMath/679e8bb4bce13d5f2383a27eca649575.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return Erf(a=0.5, b=1/2.4020563531719796, c=0.5)
@layer
@supports_masking(remask_kernel=False)
def Gabor() -> InternalLayer:
"""Gabor function `exp(-x^2) * sin(x)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return np.exp(-x**2) * np.sin(x)
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
prod11, prod12, prod22 = get_diagonal_outer_prods(
cov1, cov2, k.diagonal_batch, k.diagonal_spatial, op.mul)
sum11, sum12, sum22 = get_diagonal_outer_prods(
cov1, cov2, k.diagonal_batch, k.diagonal_spatial, op.add)
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
sum_: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
diff = 4 * (prod - nngp**2)
denom = 2 * sum_ + diff + 1
num = sum_ + diff + 2 * nngp
exp_left = np.exp(-num / (2 * denom))
exp_right = np.exp(2 * nngp / denom)
if ntk is not None:
shared_term = 1 + 2 * sum_ + 4 * (nngp**2 + prod)
diff_term = 4 * nngp * (diff + 3 * sum_ + 2)
lhs = shared_term - diff_term
rhs = shared_term + diff_term
t_dot = exp_left * (lhs + exp_right * rhs) / denom**(5. / 2)
ntk *= t_dot / 2
nngp = exp_left * (exp_right - 1) / (2 * _sqrt(denom))
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
denom = 1 + 4 * nngp
return (1 - np.exp(-2 * nngp / denom)) / (2 * _sqrt(denom))
nngp, ntk = nngp_ntk_fn(nngp, prod12, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11, sum11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22, sum22)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, 'Gabor', kernel_fn)
@layer
@supports_masking(remask_kernel=False)
def Gelu(
approximate: bool = False) -> InternalLayer:
"""Gelu function.
Args:
approximate:
only relevant for finite-width network, `apply_fn`. If `True`, computes
an approximation via `tanh`, see "`Gaussian Error Linear Units (GELUs)
<https://arxiv.org/abs/1606.08415>`_" and :obj:`jax.nn.gelu` for details.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return jax.nn.gelu(x, approximate=approximate)
def kernel_fn(k: Kernel) -> Kernel:
"""Compute kernels after a `Gelu` layer.
For NNGP see "`Avoiding Kernel Fixed Points: Computing with ELU and GELU
Infinite Networks <https://arxiv.org/abs/2002.08517>`_".
"""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
cov1_plus_1 = cov1 + 1
cov2_plus_1 = None if cov2 is None else cov2 + 1
prod11_plus_1, prod12_plus_1, prod22_plus_1 = get_diagonal_outer_prods(
cov1_plus_1, cov2_plus_1, k.diagonal_batch, k.diagonal_spatial, op.mul)
prod11, prod12, prod22 = get_diagonal_outer_prods(
cov1, cov2, k.diagonal_batch, k.diagonal_spatial, op.mul)
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
prod_plus_1: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
delta_squared = prod_plus_1 - nngp**2
delta = _sqrt(delta_squared)
angles = np.arctan2(nngp, delta)
new_nngp = (nngp**2 + prod * delta_squared) / (prod_plus_1 * delta)
new_nngp += nngp * angles
new_nngp /= 2 * np.pi
new_nngp += 0.25 * nngp
if ntk is not None:
second_term = 0.25 + angles / (2 * np.pi)
first_term = 1 / delta_squared + (1 - prod) / prod_plus_1 + 1
first_term *= nngp / delta / (2. * np.pi)
dot_sigma = first_term + second_term
ntk *= dot_sigma
return new_nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
square_root = np.sqrt(1. + 2. * nngp)
new_nngp = nngp / ((nngp + 1.) * np.sqrt(1. + 2. * nngp))
new_nngp += np.arctan2(nngp, square_root) / 2
new_nngp /= np.pi
new_nngp += 0.25
new_nngp *= nngp
return new_nngp
nngp, ntk = nngp_ntk_fn(nngp, prod12, prod12_plus_1, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11, prod11_plus_1)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22, prod22_plus_1)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, 'Gelu', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def Sin(
a: float = 1.,
b: float = 1.,
c: float = 0.) -> InternalLayer:
"""Affine transform of `Sin` nonlinearity, i.e. `a sin(b*x + c)`.
Args:
a: output scale.
b: input scale.
c: input phase shift.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return a * np.sin(b * x + c)
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
sum11, sum12, sum22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.add)
half_a_square = a**2 / 2.
def nngp_ntk_fn(nngp, sum_, ntk=None):
s1 = np.exp(b ** 2 * (-0.5 * sum_ + nngp))
s2 = np.exp(b ** 2 * (-0.5 * sum_ - nngp)) * np.cos(2 * c)
nngp = half_a_square * (s1 - s2)
if ntk is not None:
ntk *= half_a_square * b**2 * (s1 + s2)
return nngp, ntk
def nngp_fn_diag(nngp):
return half_a_square * (1. - np.exp(-2 * b**2 * nngp) * np.cos(2 * c))
nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, sum11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, sum22)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'Sin({a}, {b}, {c})', kernel_fn)
def Cos(
a: float = 1.,
b: float = 1.,
c: float = 0.) -> InternalLayer:
"""Affine transform of `Cos` nonlinearity, i.e. `a cos(b*x + c)`.
Args:
a: output scale.
b: input scale.
c: input phase shift.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return Sin(a=a, b=b, c=c + np.pi / 2)
@layer
@supports_masking(remask_kernel=True)
def Rbf(
gamma: float = 1.0) -> InternalLayer:
"""Dual activation function for normalized RBF or squared exponential kernel.
Dual activation function is `f(x) = sqrt(2)*sin(sqrt(2*gamma) x + pi/4)`.
NNGP kernel transformation correspond to (with input dimension `d`)
`k = exp(- gamma / d * ||x - x'||^2) = exp(- gamma*(q11 + q22 - 2 * q12))`.
Args:
gamma:
related to characteristic length-scale (l) that controls width of the
kernel, where `gamma = 1 / (2 l^2)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return np.sqrt(2) * np.sin(np.sqrt(2 * gamma) * x + np.pi/4)
def kernel_fn(k: Kernel) -> Kernel:
"""Compute new kernels after an `Rbf` layer."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
sum11, sum12, sum22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.add)
def nngp_ntk_fn(nngp, sum_, ntk):
nngp = np.exp(gamma * (-sum_ + 2 * nngp))
if ntk is not None:
ntk *= 2 * gamma * nngp
return nngp, ntk
def nngp_fn_diag(nngp):
return np.ones_like(nngp)
nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, sum11, None)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, sum22, None)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'Rbf({gamma})', kernel_fn)
@layer
@supports_masking(remask_kernel=False)
def ABRelu(
a: float,
b: float,
do_stabilize: bool = False) -> InternalLayer:
"""ABReLU nonlinearity, i.e. `a * min(x, 0) + b * max(x, 0)`.
Args:
a: slope for `x < 0`.
b: slope for `x > 0`.
do_stabilize: set to `True` for very deep networks.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return a * np.minimum(x, 0) + b * np.maximum(x, 0)
def kernel_fn(k: Kernel) -> Kernel:
"""Compute new kernels after an `ABRelu` layer.
See "`Invariance of Weight Distributions in Rectified MLPs
<https://arxiv.org/abs/1711.09090>`_" for the leaky ReLU derivation.
"""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if do_stabilize:
factor = np.maximum(np.max(np.abs(nngp)), 1e-12)
nngp /= factor
cov1 /= factor
if cov2 is not None:
cov2 /= factor
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
def nngp_ntk_fn(nngp, prod, ntk=None):
square_root = _sqrt(prod - nngp**2)
angles = _arctan2(square_root, nngp, fill_zero=np.pi / 2)
factor = (a - b)**2 / (2 * np.pi)
dot_sigma = (a**2 + b**2) / 2 - factor * angles
nngp = factor * square_root + dot_sigma * nngp
if ntk is not None:
ntk *= dot_sigma
return nngp, ntk
def nngp_fn_diag(nngp):
return (a**2 + b**2) / 2 * nngp
nngp, ntk = nngp_ntk_fn(nngp, prod12, ntk=ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22)
if do_stabilize:
nngp *= factor
cov1 *= factor
if cov2 is not None:
cov2 *= factor
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'ABReLU({a}, {b})', kernel_fn)
def Relu(
do_stabilize: bool = False) -> InternalLayer:
"""ReLU nonlinearity.
Args:
do_stabilize: set to `True` for very deep networks.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return ABRelu(0, 1, do_stabilize)
def LeakyRelu(
alpha: float,
do_stabilize: bool = False) -> InternalLayer:
"""Leaky ReLU nonlinearity, i.e. `alpha * min(x, 0) + max(x, 0)`.
Args:
alpha: slope for `x < 0`.
do_stabilize: set to `True` for very deep networks.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return ABRelu(alpha, 1, do_stabilize)
def Abs(
do_stabilize: bool = False) -> InternalLayer:
"""Absolute value nonlinearity.
Args:
do_stabilize: set to `True` for very deep networks.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return ABRelu(-1, 1, do_stabilize)
@layer
@supports_masking(remask_kernel=False)
def Sign() -> InternalLayer:
"""Sign function.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return np.sign(x)
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if ntk is not None:
ntk = np.zeros_like(ntk)
_, prod12, _ = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
angles = _arctan2(_sqrt(prod12 - nngp**2), nngp, fill_zero=np.pi / 2)
nngp = 1 - angles * 2 / np.pi
cov1 = np.where(cov1 == 0., 0., 1.)
cov2 = cov2 if cov2 is None else np.where(cov2 == 0, 0., 1.)
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return k
return _elementwise(fn, 'Sign', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def Exp(a: float = 1, b: float = 1) -> InternalLayer:
"""Elementwise natural exponent function `a * np.exp(b * x)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return a * np.exp(b * x)
def kernel_fn(k: Kernel) -> Kernel:
"""Compute new kernels after an `Exp` layer."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
sum11, sum12, sum22 = get_diagonal_outer_prods(
cov1, cov2, k.diagonal_batch, k.diagonal_spatial, op.add)
def nngp_ntk_fn(nngp, sum_, ntk):
nngp = np.exp(b**2 * (sum_ / 2 + nngp))
if ntk is not None:
ntk *= b**2 * nngp
return nngp, ntk
def nngp_fn_diag(nngp):
return np.exp(2 * b**2 * nngp)
nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, sum11, None)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, sum22, None)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk) * a
return _elementwise(fn, f'Exp({a}, {b})', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def Gaussian(a: float = 1, b: float = -1) -> InternalLayer:
"""Elementwise Gaussian function `a * np.exp(b * x**2)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return a * np.exp(b * x**2)
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
cov1_denom = 1 - 2 * b * cov1
cov2_denom = None if cov2 is None else 1 - 2 * b * cov2
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1_denom,
cov2_denom,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
factor = 4 * b**2
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
det = _sqrt((prod - factor * nngp**2))
if ntk is not None:
ntk *= factor * nngp / det**3
nngp = 1 / det
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
return 1 / _sqrt(1 - 4 * b * nngp)
nngp, ntk = nngp_ntk_fn(nngp, prod12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk) * a
return _elementwise(fn, f'Gaussian({a}, {b})', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def ExpNormalized(
gamma: float = 1,
shift: float = -1,
do_clip: bool = False) -> InternalLayer:
"""Simulates the "Gaussian normalized kernel".
See page 6 in
"`Neural Kernels Without Tangents <https://arxiv.org/abs/2003.02237>`_".
Args:
gamma: exponent scalar coefficient.
shift: shift exponentiated normalized covariance by this much.
do_clip: True to clip normalized covariance, potentially improving accuracy.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
Raises:
NotImplementedError: if finite width `apply_fn` is called.
"""
def kernel_fn(k: Kernel) -> Kernel:
cov1, cov2, nngp, ntk = k.cov1, k.cov2, k.nngp, k.ntk
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
tol = 1e-30
prod11 = _sqrt(prod11, tol)
prod12 = _sqrt(prod12, tol)
prod22 = _sqrt(prod22, tol) if prod22 is not None else None
def exp(cov, prod):
if cov is not None:
cov /= prod
if do_clip:
cov = np.clip(cov, -1, 1)
cov = np.exp(gamma * (cov + shift))
return cov
exp12 = exp(nngp, prod12)
return k.replace(
nngp=prod12 * exp12,
cov1=prod11 * exp(cov1, prod11),
cov2=None if cov2 is None else prod22 * exp(cov2, prod22),
ntk=ntk if ntk is None else gamma * ntk * exp12)
return _elementwise(None, 'ExpNormalized', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def Hermite(degree: int) -> InternalLayer:
"""Hermite polynomials.
Inputs to this layer are assumed to have unit norm, i.e.
`np.std(x, axis=channel_axis) == 1`. The Hermite polynomials are normalized
so that the L2 norm w.r.t. standard Gaussian is 1.
Args:
degree: a non-negative integer.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if degree < 0:
raise NotImplementedError('`degree` must be a non-negative integer.')
p = onp.polynomial.hermite_e.herme2poly([0] * degree + [1])[::-1]
coeff = functools.reduce(op.mul, range(1, degree + 1), 1)**0.5
def fn(x):
return np.polyval(p, x) / coeff
def kernel_fn(k: Kernel) -> Kernel:
warnings.warn(
'Inputs to this layer are assumed to have unit norm across '
' channels/features, i.e. np.std(x, axis=channel_axis) == 1.')
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
if ntk is not None:
if degree == 0:
ntk = np.zeros_like(ntk)
else:
ntk = degree * nngp**(degree - 1) * ntk
def _power(mat):
return mat**degree if mat is not None else None
nngp, cov1, cov2 = map(_power, (nngp, cov1, cov2))
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return k
return _elementwise(fn, f'{degree}-Hermite polynomial', kernel_fn)
@layer
@supports_masking(remask_kernel=False)
def Monomial(degree: int) -> InternalLayer:
"""Monomials, i.e. `x^degree`.
Args:
degree: an integer between 0 and 5.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if degree not in [0, 1, 2, 3, 4, 5]:
raise NotImplementedError('The `degree` must be an integer between '
'`0` and `5`.')
def fn(x):
return x**degree
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
def nngp_fn(nngp: np.ndarray, degree: int) -> np.ndarray:
if degree == -1:
nngp = np.zeros_like(nngp)
elif degree == 0:
nngp = np.ones_like(nngp)
elif degree == 1:
pass
elif degree == 2:
nngp = 2 * nngp ** 2 + prod
elif degree == 3:
nngp = 6 * nngp ** 3 + 9 * nngp * prod
elif degree == 4:
nngp = 3 * (8 * nngp ** 4 + 3 * prod * (8 * nngp ** 2 + prod))
elif degree == 5:
nngp = 15 * nngp * (
8 * nngp ** 4 + 5 * prod * (8 * nngp ** 2 + 3 * prod))
else:
raise NotImplementedError(degree)
return nngp
if ntk is not None:
ntk *= degree**2 * nngp_fn(nngp, degree - 1)
nngp = nngp_fn(nngp, degree)
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
return _double_factorial(2 * degree - 1) * nngp**degree
nngp, ntk = nngp_ntk_fn(nngp, prod12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22)
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return k
return _elementwise(fn, f'{degree}-monomial', kernel_fn)
@layer
@supports_masking(remask_kernel=False)
def RectifiedMonomial(degree: int) -> InternalLayer:
"""Rectified monomials, i.e. `(x >= 0) * x^degree`.
Args:
degree: a non-negative integer power.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
if degree < 0:
raise NotImplementedError('`degree` must be a non-negative integer.')
def fn(x):
return (x >= 0) * x**degree
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
def j(nngp: np.ndarray, sqrt_prod: np.ndarray) -> np.ndarray:
theta = np.arccos(nngp / sqrt_prod)
def f0(theta: np.ndarray) -> np.ndarray:
return (np.pi - theta) / np.sin(theta)
def diff(f: Callable[[np.ndarray], np.ndarray]
) -> Callable[[np.ndarray], np.ndarray]:
def df(theta: np.ndarray) -> np.ndarray:
return np.vectorize(grad(f))(theta) / np.sin(theta)
return df
f = f0
for _ in range(degree):
f = diff(f)
return (-1)**degree * (np.sin(theta))**(2 * degree + 1) * f(theta)
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
sqrt_prod = _sqrt(prod)
coeff = sqrt_prod**degree / (2 * np.pi)
if ntk is not None:
if degree == 0:
ntk = np.zeros_like(ntk)
else:
j_dot = np.vectorize(grad(j))(nngp, sqrt_prod)
ntk *= coeff * j_dot
nngp = coeff * j(nngp, sqrt_prod)
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray) -> np.ndarray:
return _double_factorial(2 * degree - 1) * nngp**degree / 2
nngp, ntk = nngp_ntk_fn(nngp, prod12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22)
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return k
return _elementwise(fn, f'{degree}-rectified-monomial', kernel_fn)
@layer
@supports_masking(remask_kernel=False)
def Polynomial(coef: Sequence[float]) -> InternalLayer:
"""Polynomials, i.e. `coef[0] + coef[1] * x + … + coef[n] * x**n`.
Args:
coef:
a sequence of coefficients. Follows
:class:`numpy.polynomial.polynomial.Polynomial` API.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
coef = onp.array(coef)
def fn(x):
return np.polyval(coef[::-1], x)
degree = len(coef)
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
def r(n: Optional[np.ndarray], l: int) -> Optional[np.ndarray]:
if n is None:
return None
coef_dict = {
2 * i + l: coef[2 * i + l] * _factorial(2 * i + l) / (
2**i * _factorial(i) * _factorial(l)**0.5)
for i in range(0, (degree - 1 - l) // 2 + 1)
}
coef_l = onp.array(
[coef_dict[i] if i in coef_dict else 0 for i in range(degree)])
return np.polyval(coef_l[::-1], n**0.5)
if degree == 0:
rs11, rs12, rs22 = [], [], []
else:
rs11, rs12, rs22 = list(zip(*[
get_diagonal_outer_prods(r(cov1, l),
r(cov2, l),
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
for l in range(degree)
]))
prod11, prod12, prod22 = get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.mul)
def nngp_ntk_fn(
nngp: np.ndarray,
prod: np.ndarray,
r_prods: Sequence[np.ndarray],
ntk: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
ratio = nngp / _sqrt(prod)
if ntk is not None:
t_dot = np.zeros_like(ntk)
for l in range(1, degree):
t_dot += l * r_prods[l] * ratio**(l - 1)
ntk *= t_dot / _sqrt(prod)
nngp = np.zeros_like(nngp)
for l in range(degree):
nngp += r_prods[l] * ratio ** l
return nngp, ntk
def nngp_fn_diag(nngp: np.ndarray,
r_prods: Sequence[np.ndarray]) -> np.ndarray:
out = np.zeros_like(nngp)
for l in range(degree):
out += r_prods[l]
return out
nngp, ntk = nngp_ntk_fn(nngp, prod12, rs12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1, rs11)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2, rs22)
else:
cov1, _ = nngp_ntk_fn(cov1, prod11, rs11)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, prod22, rs22)
k = k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return k
return _elementwise(fn, f'{coef}-polynomial', kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def Elementwise(
fn: Optional[Callable[[float], float]] = None,
nngp_fn: Optional[Callable[[float, float, float], float]] = None,
d_nngp_fn: Optional[Callable[[float, float, float], float]] = None
) -> InternalLayer:
"""Elementwise application of `fn` using provided `nngp_fn`.
Constructs a layer given only scalar-valued nonlinearity / activation
`fn` and the 2D integral `nngp_fn`. NTK function is derived automatically in
closed form from `nngp_fn`.
If you cannot provide the `nngp_fn`, see :obj:`ElementwiseNumerical` to use
numerical integration or `nt.monte_carlo.monte_carlo_kernel_fn` to use Monte
Carlo sampling.
If your function is implemented separately (e.g. `nt.stax.Relu` etc) it's best
to use the custom implementation, since it uses symbolically simplified
expressions that are more precise and numerically stable.
For details, please see "`Fast Neural Kernel Embeddings for General
Activations <https://arxiv.org/abs/2209.04121>`_".
See Also:
`examples/elementwise.py`.
Example:
>>> fn = jax.scipy.special.erf # type: Callable[[float], float]
>>> #
>>> def nngp_fn(cov12: float, var1: float, var2: float) -> float:
>>> prod = (1 + 2 * var1) * (1 + 2 * var2)
>>> return np.arcsin(2 * cov12 / np.sqrt(prod)) * 2 / np.pi
>>> #
>>> # Use autodiff and vectorization to construct the layer:
>>> _, _, kernel_fn_auto = stax.Elementwise(fn, nngp_fn)
>>> #
>>> # Use custom pre-derived expressions
>>> # (should be faster and more numerically stable):
>>> _, _, kernel_fn_stax = stax.Erf()
>>> #
>>> kernel_fn_auto(x1, x2) == kernel_fn_stax(x1, x2) # usually `True`.
Args:
fn:
a scalar-input/valued function `fn : R -> R`, the activation /
nonlinearity. If `None`, invoking the finite width `apply_fn` will raise
an exception.
nngp_fn:
a scalar-valued function
`nngp_fn : (cov12, var1, var2) |-> E[fn(x_1) * fn(x_2)]`, where the
expectation is over bivariate normal `x1, x2` with variances `var1`,
`var2` and covarianve `cov12`. Needed for both NNGP and NTK calculation.
If `None`, invoking infinite width `kernel_fn` will raise an exception.
d_nngp_fn:
an optional scalar-valued function
`d_nngp_fn : (cov12, var1, var2) |-> E[fn'(x_1) * fn'(x_2)]` with the same
`x1, x2` distribution as in `nngp_fn`. If `None`, will be computed using
automatic differentiation as `d_nngp_fn = d(nngp_fn)/d(cov12)`, which may
lead to worse precision or numerical stability. `nngp_fn` and `d_nngp_fn`
are used to derive the closed-form expression for the NTK.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
Raises:
NotImplementedError: if a `fn`/`nngp_fn` is not provided, but `apply_fn`/
`kernel_fn` is called respectively.
"""
if fn is not None:
name = fn.__name__
elif nngp_fn is not None:
name = nngp_fn.__name__
else:
raise ValueError('No finite (`fn`) or infinite (`nngp_fn`) functions '
'provided, the layer will not do anything.')
if nngp_fn is None:
kernel_fn = None
else:
if d_nngp_fn is None:
url = 'https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where'
warnings.warn(
f'Using JAX autodiff to compute the `fn` derivative for NTK. Beware '
f'of {url}.')
d_nngp_fn = np.vectorize(grad(nngp_fn))
def kernel_fn(k: Kernel) -> Kernel:
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
var1 = get_diagonal(cov1, k.diagonal_batch, k.diagonal_spatial)
var2 = get_diagonal(cov2, k.diagonal_batch, k.diagonal_spatial)
if ntk is not None:
ntk *= _vmap_2d(d_nngp_fn, nngp, var1, var2, False, k.diagonal_spatial)
nngp = _vmap_2d(nngp_fn, nngp, var1, var2, False, k.diagonal_spatial)
cov1 = _vmap_2d(
nngp_fn, cov1, var1, None, k.diagonal_batch, k.diagonal_spatial)
if cov2 is not None:
cov2 = _vmap_2d(
nngp_fn, cov2, var2, None, k.diagonal_batch, k.diagonal_spatial)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, name, kernel_fn)
@layer
@supports_masking(remask_kernel=True)
def ElementwiseNumerical(
fn: Callable[[float], float],
deg: int,
df: Optional[Callable[[float], float]] = None) -> InternalLayer:
"""Activation function using numerical integration.
Supports general activation functions using Gauss-Hermite quadrature.
For details, please see "`Fast Neural Kernel Embeddings for General
Activations <https://arxiv.org/abs/2209.04121>`_".
See Also:
`examples/elementwise_numerical.py`.
Args:
fn:
activation function.
deg:
number of sample points and weights for quadrature. It must be >= 1.
We observe for smooth activations `deg=25` is a good place to start.
For non-smooth activation functions (e.g. ReLU, Abs) quadrature is not
recommended (for now use `nt.monte_carlo_kernel_fn`). Due to bivariate
integration, compute time and memory scale as O(deg**2) for more
precision. See eq (13) in
https://mathworld.wolfram.com/Hermite-GaussQuadrature.html
for error estimates in the case of 1d Gauss-Hermite quadrature.
df:
optional, derivative of the activation function (`fn`). If not provided,
it is computed by `jax.grad`. Providing analytic derivative can speed up
the NTK computations.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
warnings.warn(
f'Numerical Activation Layer with fn={fn}, deg={deg} used!'
'Note that numerical error is controlled by `deg` and for a given'
'tolerance level, required `deg` will highly be dependent on the choice'
'of `fn`.')
quad_points = osp.special.roots_hermite(deg)
if df is None:
url = 'https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where'
warnings.warn(
f'Using JAX autodiff to compute the `fn` derivative for NTK. Beware of '
f'{url}.')
df = np.vectorize(grad(fn))
def kernel_fn(k: Kernel) -> Kernel:
"""Kernel transformation of activation function using quadrature."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
d1 = get_diagonal(cov1, k.diagonal_batch, k.diagonal_spatial)
d2 = get_diagonal(cov2, k.diagonal_batch, k.diagonal_spatial)
end_axis = 1 if k.diagonal_spatial else cov1.ndim
q11 = utils.interleave_ones(d1, 0, end_axis, True)
q22 = utils.interleave_ones(d1 if d2 is None else d2, 0, end_axis, False)
def nngp_ntk_fn(nngp, q11, q22, ntk=None):
"""Simple Gauss-Hermite quadrature routine."""
xs, ws = quad_points
grid = np.outer(ws, ws)
x = xs.reshape((xs.shape[0],) + (1,) * (nngp.ndim + 1))
y = xs.reshape((1, xs.shape[0]) + (1,) * nngp.ndim)
xy_axes = (0, 1)
nngp = np.expand_dims(nngp, xy_axes)
q11, q22 = np.expand_dims(q11, xy_axes), np.expand_dims(q22, xy_axes)
def integrate(f):
fvals = f(_sqrt(2 * q11) * x) * f(
nngp / _sqrt(q11 / 2, 1e-30) * x + _sqrt(
2*(q22 - nngp**2/q11)) * y)
return np.tensordot(grid, fvals, (xy_axes, xy_axes)) / np.pi
if ntk is not None:
ntk *= integrate(df)
nngp = integrate(fn)
return nngp, ntk
def nngp_fn_diag(nngp):
xs, ws = quad_points
x = xs.reshape((xs.shape[0],) + (1,) * nngp.ndim)
x_axes = (0,)
nngp = np.expand_dims(nngp, x_axes)
fval = fn(_sqrt(2 * nngp) * x) ** 2
return np.tensordot(ws, fval, (x_axes, x_axes)) / np.sqrt(np.pi)
nngp, ntk = nngp_ntk_fn(nngp, q11, q22, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
start_axis = 1 if k.diagonal_batch else 0
q11 = utils.interleave_ones(d1, start_axis, end_axis, True)
q22 = utils.interleave_ones(d1, start_axis, end_axis, False)
cov1, _ = nngp_ntk_fn(cov1, q11, q22)
if cov2 is not None:
q11 = utils.interleave_ones(d2, start_axis, end_axis, True)
q22 = utils.interleave_ones(d2, start_axis, end_axis, False)
cov2, _ = nngp_ntk_fn(cov2, q11, q22)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'ElementwiseNumerical({fn},deg={deg})', kernel_fn)
def _elementwise(
fn: Optional[Callable[[float], float]],
name: str,
kernel_fn: Optional[LayerKernelFn],
) -> InternalLayer:
init_fn = lambda rng, input_shape: (input_shape, ())
def apply_fn(params, inputs, **kwargs):
if fn is None:
raise NotImplementedError(fn)
return fn(inputs)
@requires(diagonal_spatial=Diagonal())
def new_kernel_fn(k: Kernel, **kwargs) -> Kernel:
if kernel_fn is None:
raise NotImplementedError(kernel_fn)
if not k.is_gaussian:
raise ValueError('The input to the activation function must be Gaussian, '
'i.e. a random affine transform is required before the '
'activation function.')
k = kernel_fn(k)
return k.replace(is_gaussian=False)
init_fn.__name__ = apply_fn.__name__ = new_kernel_fn.__name__ = name
return init_fn, apply_fn, new_kernel_fn
@functools.partial(custom_jvp, nondiff_argnums=(1,))
def _sqrt(x, tol=0.):
return np.sqrt(np.maximum(x, tol))
@getattr(_sqrt, 'defjvp', lambda f: f) # ReadTheDocs-friendly `@_sqrt.defjvp`.
def _sqrt_jvp(tol, primals, tangents):
x, = primals
x_dot, = tangents
safe_tol = max(tol, 1e-30)
square_root = _sqrt(x, safe_tol)
square_root_out = _sqrt(x, tol)
return square_root_out, np.where(x > safe_tol, x_dot / (2 * square_root), 0.)
@functools.partial(custom_jvp, nondiff_argnums=(2,))
def _arctan2(x, y, fill_zero: Optional[float] = None):
if fill_zero is not None:
return np.where(np.bitwise_and(x == 0., y == 0.),
fill_zero,
np.arctan2(x, y))
return np.arctan2(x, y)
@getattr(_arctan2, 'defjvp', lambda f: f) # Equivalent to `@_arctan2.defjvp`.
def _arctan2_jvp(fill_zero, primals, tangents):
x, y = primals
x_dot, y_dot = tangents
primal_out = _arctan2(x, y, fill_zero)
safe_tol = 1e-30
denom = np.maximum(x**2 + y**2, safe_tol)
tangent_out = x_dot * (y / denom) - y_dot * (x / denom)
return primal_out, tangent_out
def _vmap_2d(fn: Callable[[float, float, float], float],
cov12: np.ndarray,
var1: np.ndarray,
var2: Optional[np.ndarray],
diagonal_batch: bool,
diagonal_spatial: bool) -> np.ndarray:
"""Effectively a "2D vmap" of `fn(cov12, var1, var2)`.
Applicable for all possible kernel layouts.
Args:
fn:
scalar-valued, elementwise `fn(cov12, var1, var2)` function to apply.
cov12:
covariance tensor (`q12`), `nngp`/`ntk`/`cov1`/`cov2`, of shape
`(N1[, N2])`, `(N1[, N2], X, Y, ...)`, `(N1[, N2], X, X, Y, Y, ...)`
depending on `diagonal_batch`, `diagonal_spatial`, and the number of
spatial dimensions.
var1:
variance tensor (`q11`), has shape `(N1[, X, Y, ...])`.
var2:
variance tensor (`q22`), has shape `(N1[, X, Y, ...])`.
diagonal_batch:
`True` if `cov12` has only one batch dimension.
diagonal_spatial:
`True` if `cov12` has spatial dimensions appearing once (vs twice).
Returns:
Resulting array `[fn(cov12[i, j], var1[i], var2[j])]_{i j}`. Has the same
shape as `cov12`.
"""
batch_ndim = 1 if diagonal_batch else 2
start = 2 - batch_ndim
cov_end = batch_ndim if diagonal_spatial else cov12.ndim
_cov12 = utils.make_2d(cov12, start, cov_end)
var_end = 1 if diagonal_spatial else var1.ndim
var1 = var1.reshape(var1.shape[:start] + (-1,) + var1.shape[var_end:])
var2 = var1 if var2 is None else var2.reshape(var2.shape[:start] + (-1,) +
var2.shape[var_end:])
fn = vmap(
vmap(
np.vectorize(fn),
in_axes=(start, None, start),
out_axes=start
),
in_axes=(start, start, None),
out_axes=start
)
out = fn(_cov12, var1, var2) # type: np.ndarray
out_shape = (cov12.shape[:start] +
cov12.shape[start:cov_end:2] +
cov12.shape[start + 1:cov_end:2] +
cov12.shape[cov_end:])
out = out.reshape(out_shape)
out = utils.zip_axes(out, start, cov_end)
return out
def _factorial(n: int) -> int:
return functools.reduce(op.mul, range(1, n + 1), 1)
def _double_factorial(n: int) -> int:
return functools.reduce(op.mul, range(n, 0, -2), 1)
| 42,542 | 29.940364 | 109 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/utils/typing.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Type Definitions."""
from typing import Any, Dict, Generator, List, Optional, Sequence, TYPE_CHECKING, Tuple, TypeVar, Union
from jax import random
import jax.numpy as np
from .kernel import Kernel
from typing_extensions import Protocol
PyTree = Any
"""A PyTree, see `JAX docs`_ for details.
.. _JAX docs: https://jax.readthedocs.io/en/latest/pytrees.html
"""
Axes = Union[int, Sequence[int]]
"""Axes specification, can be integers (`axis=-1`) or sequences (`axis=(1, 3)`).
"""
T = TypeVar('T')
if TYPE_CHECKING:
NTTree = Union[List['NTTree[T]'], Tuple['NTTree[T]', ...], T]
NTTrees = Union[List['NTTree[T]'], Tuple['NTTree[T]', ...]]
else:
# Can't use recursive types with `sphinx-autodoc-typehints`.
NTTree = Union[List[T], Tuple[T, ...], T]
"""Neural Tangents Tree.
Trees of kernels and arrays naturally emerge in certain neural
network computations (for example, when neural networks have nested parallel
layers).
Mimicking JAX, we use a lightweight tree structure called an :class:`NTTree`.
:class:`NTTree` has internal nodes that are either lists or tuples and leaves
which are either :class:`jax.numpy.ndarray` or
:class:`~neural_tangents.Kernel` objects.
"""
NTTrees = Union[List[T], Tuple[T, ...]]
"""A list or tuple of :class:`NTTree` s.
"""
Shapes = NTTree[Tuple[int, ...]]
"""A shape - a tuple of integers, or an :class:`NTTree` of such tuples.
"""
# Layer Definition.
class InitFn(Protocol):
"""A type alias for initialization functions.
Initialization functions construct parameters for neural networks given a
random key and an input shape. Specifically, they produce a tuple giving the
output shape and a PyTree of parameters.
"""
def __call__(
self,
rng: random.KeyArray,
input_shape: Shapes,
**kwargs
) -> Tuple[Shapes, PyTree]:
...
class ApplyFn(Protocol):
"""A type alias for apply functions.
Apply functions do computations with finite-width neural networks. They are
functions that take a PyTree of parameters and an array of inputs and produce
an array of outputs.
"""
def __call__(
self,
params: PyTree,
inputs: NTTree[np.ndarray],
*args,
**kwargs
) -> NTTree[np.ndarray]:
...
class MaskFn(Protocol):
"""A type alias for a masking functions.
Forward-propagate a mask in a layer of a finite-width network.
"""
def __call__(
self,
mask: Union[np.ndarray, Sequence[np.ndarray]],
input_shape: Shapes,
) -> Union[np.ndarray, Sequence[np.ndarray]]:
...
KernelOrInput = Union[NTTree[Kernel], NTTree[np.ndarray]]
Get = Union[Tuple[str, ...], str, None]
class LayerKernelFn(Protocol):
"""A type alias for pure kernel functions.
A pure kernel function takes a PyTree of Kernel object(s) and produces a
PyTree of Kernel object(s). These functions are used to define new layer
types.
"""
def __call__(
self,
k: NTTree[Kernel]
) -> NTTree[Kernel]:
...
class AnalyticKernelFn(Protocol):
"""A type alias for analytic kernel functions.
A kernel function that computes an analytic kernel. Takes either a
:class:`~neural_tangents.Kernel` or :class:`jax.numpy.ndarray` inputs and a
`get` argument that specifies what quantities should be computed by the
kernel. Returns either a :class:`~neural_tangents.Kernel` object or
:class:`jax.numpy.ndarray`-s for kernels specified by `get`.
"""
def __call__(
self,
x1: KernelOrInput,
x2: Optional[NTTree[np.ndarray]] = None,
get: Get = None,
**kwargs
) -> Union[NTTree[Kernel], NTTree[np.ndarray]]:
...
class EmpiricalGetKernelFn(Protocol):
"""A type alias for empirical kernel functions accepting a `get` argument.
A kernel function that produces an empirical kernel from a single
instantiation of a neural network specified by its parameters.
Equivalent to `EmpiricalKernelFn`, but accepts a `get` argument, which can be
for example `get=("nngp", "ntk")`, to compute both kernels together.
"""
def __call__(
self,
x1: NTTree[np.ndarray],
x2: Optional[NTTree[np.ndarray]],
get: Get,
params: PyTree,
**kwargs
) -> NTTree[np.ndarray]:
...
class EmpiricalKernelFn(Protocol):
"""A type alias for empirical kernel functions computing either NTK or NNGP.
A kernel function that produces an empirical kernel from a single
instantiation of a neural network specified by its parameters.
Equivalent to `EmpiricalGetKernelFn` with `get="nngp"` or `get="ntk"`.
"""
def __call__(
self,
x1: NTTree[np.ndarray],
x2: Optional[NTTree[np.ndarray]],
params: PyTree,
**kwargs
) -> NTTree[np.ndarray]:
...
class MonteCarloKernelFn(Protocol):
"""A type alias for Monte Carlo kernel functions.
A kernel function that produces an estimate of an `AnalyticKernel`
by monte carlo sampling given a `PRNGKey`.
"""
def __call__(
self,
x1: NTTree[np.ndarray],
x2: Optional[NTTree[np.ndarray]],
get: Get = None,
**kwargs
) -> Union[NTTree[np.ndarray], Generator[NTTree[np.ndarray], None, None]]:
...
KernelFn = Union[
AnalyticKernelFn,
EmpiricalKernelFn,
EmpiricalGetKernelFn,
MonteCarloKernelFn,
]
InternalLayer = Tuple[InitFn, ApplyFn, LayerKernelFn]
InternalLayerMasked = Tuple[InitFn, ApplyFn, LayerKernelFn, MaskFn]
Layer = Tuple[InitFn, ApplyFn, AnalyticKernelFn]
Kernels = Union[List[Kernel], Tuple[Kernel, ...]]
"""Kernel inputs/outputs of `FanOut`, `FanInSum`, etc.
"""
_VMapAxis = Optional[PyTree]
"""A `PyTree` of integers.
"""
VMapAxisTriple = Tuple[_VMapAxis, _VMapAxis, Dict[str, _VMapAxis]]
VMapAxes = Union[_VMapAxis, VMapAxisTriple]
"""Specifies `(input, output, kwargs)` axes for `vmap` in empirical NTK.
"""
| 6,397 | 25.114286 | 103 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/utils/utils.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General-purpose internal utilities.
If a function or class is used in multiple modules, put it here.
"""
from collections import namedtuple
import functools
import inspect
import operator
import types
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Sized, Tuple, Type, TypeVar, Union
import warnings
import jax
from jax import core
from jax import random
import jax.numpy as np
from jax.tree_util import tree_all, tree_map
import numpy as onp
PyTree = Any
Axes = Union[int, Sequence[int]]
def is_list_or_tuple(x) -> bool:
# We do not want to return True if x is a subclass of list or tuple since
# otherwise this will return true for namedtuples.
return type(x) == list or type(x) == tuple
def is_nt_tree_of(x, dtype: Union[Type, Tuple[Type, ...]]) -> bool:
if isinstance(x, dtype):
return True
if not is_list_or_tuple(x):
return False
return all(is_nt_tree_of(_x, dtype) for _x in x)
def nt_tree_fn(
nargs: Optional[int] = None,
tree_structure_argnum: Optional[int] = None,
reduce: Callable = lambda x: x
):
"""Convert a function that acts on single inputs to one that acts on trees.
`nt_tree_fn` treats the first `nargs` arguments as NTTrees and the remaining
arguments as broadcasted over the tree structure. `nt_tree_fn` then calls the
function on each leaf of the tree. Each node of the tree optionally calls a
reduce function over the values of its children.
If `tree_structure_argnum` is None then each of the NTTrees must have the same
structure. If `tree_structure_argnum` is an integer then then a specific tree
is used to infer the structure.
Args:
nargs:
The number of arguments to be treated as NTTrees. If `nargs` is `None`
then all of the arguments are used. `nargs` can also be negative which
follows numpy's semantics for array indexing.
tree_structure_argnum:
The argument used to infer the tree structure to be traversed. If
`tree_structure_argnum` is None then a check is performed to ensure that
all trees have the same structure.
reduce:
A callable that is applied recursively by each internal tree node to its
children.
Returns:
A decorator `tree_fn` that transforms a function, `fn`, from acting on
leaves to acting on NTTrees.
"""
def check_tree_structure(args):
"""Ensure the structure of the trees in each of the `nargs` is the same."""
if any(is_list_or_tuple(x) for x in args):
if not all(type(x) == type(args[0]) for x in args[1:]):
raise TypeError(f'Inconsistent NTTree structure found. '
f'Node Types: {[type(x) for x in args]}.')
"""
Regarding the use of zip, consider an example `x1 = x2 = (1, (1, 1))`.
We would like to determine whether these two trees have the same
structure.
On the first recurrence `x1` and `x2` are both tuples so the check
passes and `zip(*args) = [(1, 1), ((1, 1), (1, 1))]` so that
`(check_tree_structure(x) for x in zip(x1, x2))` will first check that
the first element of `x1` has the same tree structure as the first
element of `x2` and then the second element and so on.
"""
for x in zip(*args):
check_tree_structure(x)
def tree_fn(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
_nargs = len(args) if nargs is None else nargs
recurse, norecurse = args[:_nargs], args[_nargs:]
structure_argnum = tree_structure_argnum
if structure_argnum is None:
check_tree_structure(recurse)
structure_argnum = 0
if is_list_or_tuple(args[structure_argnum]):
list_or_tuple = type(args[structure_argnum])
return reduce(list_or_tuple(
wrapped_fn(*(xs + norecurse), **kwargs) for xs in zip(*recurse)))
return fn(*args, **kwargs)
return wrapped_fn
return tree_fn
def all_none(x, attr: Optional[str] = None) -> bool:
get_fn = (lambda x: x) if attr is None else lambda x: getattr(x, attr)
return tree_all(tree_map(lambda x: get_fn(x) is None, x))
def canonicalize_get(get):
if get is None:
return True, get
if not get:
# NOTE(schsam): It seems slightly nicer to not support the empty-tuple
# case. Happy to add support later, if there's a use-case.
raise ValueError('"get" must be non-empty.')
get_is_not_tuple = isinstance(get, str)
if get_is_not_tuple:
get = (get,)
get = tuple(s.lower() for s in get)
if len(set(get)) < len(get):
raise ValueError('All entries in "get" must be unique. Got {}'.format(get))
return get_is_not_tuple, get
_KERNEL_NAMED_TUPLE_CACHE: Dict[Any, Any] = {}
def _named_tuple_factory(name, get):
key = (name, get)
if key in _KERNEL_NAMED_TUPLE_CACHE:
return _KERNEL_NAMED_TUPLE_CACHE[key]
else:
_KERNEL_NAMED_TUPLE_CACHE[key] = namedtuple(name, get)
return _named_tuple_factory(name, get)
def _output_to_dict(output):
if isinstance(output, dict):
return output
if hasattr(output, 'asdict'):
return output.asdict()
if hasattr(output, '_asdict'):
return output._asdict()
if isinstance(output, types.GeneratorType):
return (_output_to_dict(out) for out in output)
raise ValueError(type(output))
def wraps(f):
def wrapper(g):
@functools.wraps(f)
def h(*args, **kwargs):
return g(*args, **kwargs)
h.__signature__ = inspect.signature(f)
return h
return wrapper
def get_namedtuple(name):
def getter_decorator(fn):
try:
argspec = inspect.getfullargspec(fn)
get_index = argspec.args.index('get')
defaults = argspec.defaults
except:
raise ValueError('`get_namedtuple` functions must have a `get` argument.')
@wraps(fn)
def getter_fn(*args, **kwargs):
canonicalized_args = list(args)
if 'get' in kwargs:
get_is_not_tuple, get = canonicalize_get(kwargs['get'])
kwargs['get'] = get
elif get_index < len(args):
get_is_not_tuple, get = canonicalize_get(args[get_index])
canonicalized_args[get_index] = get
elif defaults is None:
raise ValueError(
'`get_namedtuple` function must have a `get` argument provided or '
'set by default.')
else:
get_is_not_tuple, get = canonicalize_get(defaults[get_index -
len(args)])
fn_out = fn(*canonicalized_args, **kwargs)
@nt_tree_fn()
def canonicalize_output(out):
if get is None:
if isinstance(out, dict):
ReturnType = _named_tuple_factory(name, tuple(out.keys()))
out = ReturnType(*out.values())
return out
out = _output_to_dict(out)
if get_is_not_tuple:
if isinstance(out, types.GeneratorType):
return (output[get[0]] for output in out)
else:
return out[get[0]]
ReturnType = _named_tuple_factory(name, get)
if isinstance(out, types.GeneratorType):
return (ReturnType(*tuple(output[g] for g in get)) for output in out)
else:
return ReturnType(*tuple(out[g] for g in get))
return canonicalize_output(fn_out)
return getter_fn
return getter_decorator
@nt_tree_fn(nargs=2, reduce=lambda x: np.all(np.array(x)))
def x1_is_x2(x1: np.ndarray,
x2: Optional[np.ndarray] = None,
eps: float = 1e-12) -> Union[bool, np.ndarray]:
if not isinstance(x1, (onp.ndarray, np.ndarray)):
raise TypeError('`x1` must be an ndarray. A {} is found.'.format(type(x1)))
if x2 is None:
return True
if x1 is x2:
return True
if x1.shape != x2.shape:
return False
if jax.default_backend() == 'tpu':
eps = 1e-4
return np.all(np.abs(x1 - x2) < eps)
def _get_ndim(x: Union[int, Sized, np.ndarray]) -> int:
"""Get number of dimensions given number of dimensions / shape / array."""
if hasattr(x, 'ndim'):
n = x.ndim
elif hasattr(x, '__len__'):
n = len(x)
elif isinstance(x, int):
n = x
else:
raise TypeError(x, type(x))
return n
def mod(axis: Axes, x: Union[int, Sized, np.ndarray]) -> List[int]:
"""Makes `axis` non-negative given number of dimensions / shape / array."""
n = _get_ndim(x)
if isinstance(axis, int):
axis = [axis]
return [(i % n) if n > 0 else i for i in axis]
def canonicalize_axis(axis: Axes,
x: Union[int, Sized, np.ndarray]) -> List[int]:
"""Converts axis into a sorted non-negative list.
Args:
axis: input axis.
x: array / shape / number of dimensions.
Returns:
A sorted list of integer axes.
"""
axis = [axis] if isinstance(axis, int) else list(axis)
n = _get_ndim(x)
return list(set(onp.arange(n)[axis]))
def zip_axes(x: np.ndarray,
start_axis: int = 0,
end_axis: Optional[int] = None) -> np.ndarray:
"""Zip (interleave) axes starting from `start_axis`.
Changes the shape as follows:
`[..., X, Y, Z, ..., X, Y, Z, ...] -> [..., X, X, ..., Y, Y, ..., Z, Z, ...]`
Args:
x: `np.ndarray` with an even number of dimensions following `start_axis`.
start_axis: `int`, number of axis from which to zip (interleave).
end_axis: `int`, number of axis until which to zip (interleave).
Returns:
A `np.ndarray` with a new shape.
"""
return _zip_axes(x, start_axis, end_axis, unzip=False)
def unzip_axes(x: np.ndarray,
start_axis: int = 0,
end_axis: Optional[int] = None) -> np.ndarray:
"""Unzip (de-interleave) axes starting from `start_axis`.
Changes the shape as follows:
`[..., X, X, ..., Y, Y, ..., Z, Z, ...] -> [..., X, Y, Z, ..., X, Y, Z, ...]`
Args:
x: `np.ndarray` with an even number of dimensions following `start_axis`.
start_axis: `int`, number of axis from which to unzip (de-interleave).
end_axis: `int`, number of axis until which to unzip (de-interleave).
Returns:
A `np.ndarray` with a new shape.
"""
return _zip_axes(x, start_axis, end_axis, unzip=True)
def _zip_axes(x: np.ndarray,
start_axis: int = 0,
end_axis: Optional[int] = None,
unzip: bool = False) -> np.ndarray:
"""Zip/unzip (interleave/de-interleave) axes starting from `start_axis`.
Changes the shape as follows:
If `unzip == True`:
`[..., X, X, ..., Y, Y, ..., Z, Z, ...] -> [..., X, Y, Z, ..., X, Y, Z, ..]`
If `unzip == False`:
`[..., X, Y, Z, ..., X, Y, Z, ...] -> [..., X, X, ..., Y, Y, ..., Z, Z, ..]`
Args:
x: `np.ndarray` with an even number of dimensions following `start_axis`.
start_axis: `int`, number of axis from which to zip/unzip.
end_axis: `int`, number of axis until which to zip/unzip.
unzip: `bool`, set to `True` to unzip instead of zip.
Returns:
A `np.ndarray` with a new shape.
"""
if end_axis is None:
end_axis = x.ndim
half_ndim, ragged = divmod(end_axis - start_axis, 2)
if ragged:
raise ValueError(
f'Need even number of axes to zip, got {end_axis - start_axis}.')
odd_axes = range(start_axis + 1, end_axis, 2)
last_axes = range(end_axis - half_ndim, end_axis)
if unzip:
x = np.moveaxis(x, odd_axes, last_axes)
else:
x = np.moveaxis(x, last_axes, odd_axes)
return x
def diagonal_between(x: np.ndarray,
start_axis: int = 0,
end_axis: Optional[int] = None) -> np.ndarray:
"""Returns the diagonal along all dimensions between start and end axes."""
if end_axis is None:
end_axis = x.ndim
half_ndim, ragged = divmod(end_axis - start_axis, 2)
if ragged:
raise ValueError(
f'Need even number of axes to flatten, got {end_axis - start_axis}.')
if half_ndim == 0:
return x
side_shape = x.shape[start_axis:start_axis + half_ndim]
side_size = size_at(side_shape)
shape_2d = x.shape[:start_axis] + (side_size, side_size) + x.shape[end_axis:]
shape_result = x.shape[:start_axis] + side_shape + x.shape[end_axis:]
x = np.diagonal(x.reshape(shape_2d), axis1=start_axis, axis2=start_axis+1)
x = np.moveaxis(x, -1, start_axis)
return x.reshape(shape_result)
def zip_flat(x, y):
return tuple(c for xy in zip(x, y) for c in xy)
def interleave_ones(x, start_axis, end_axis, x_first):
x_axes = x.shape[start_axis:end_axis]
ones = (1,) * (end_axis - start_axis)
shape = x.shape[:start_axis]
if x_first:
shape += zip_flat(x_axes, ones)
else:
shape += zip_flat(ones, x_axes)
shape += x.shape[end_axis:]
return x.reshape(shape)
def outer_prod(x, y, start_axis, end_axis, prod_op):
if y is None:
y = x
x = interleave_ones(x, start_axis, end_axis, True)
y = interleave_ones(y, start_axis, end_axis, False)
return prod_op(x, y)
_ArrayOrShape = TypeVar('_ArrayOrShape',
onp.ndarray,
np.ndarray,
List[int],
Tuple[int, ...])
def reverse_zipped(
x: _ArrayOrShape,
start_axis: int = 0
) -> _ArrayOrShape:
if x is not None:
ndim = _get_ndim(x)
source_axes = tuple(j
for i in range(ndim - 2, start_axis - 1, -2)
for j in (i, i + 1))
if isinstance(x, (onp.ndarray, np.ndarray)):
target_axes = range(start_axis, ndim)
x = np.moveaxis(x, source_axes, target_axes)
else:
x = x[:start_axis] + type(x)(x[i] for i in source_axes)
return x
def mask(
x: Optional[np.ndarray],
mask_mat: Optional[np.ndarray]
) -> Optional[np.ndarray]:
if x is None or mask_mat is None:
return x
return np.where(mask_mat, np.zeros((), x.dtype), x)
def size_at(
x: Union[_ArrayOrShape, core.ShapedArray],
axes: Optional[Iterable[int]] = None
) -> int:
if hasattr(x, 'shape'):
x = x.shape
if axes is None:
axes = range(len(x))
return functools.reduce(operator.mul, [x[a] for a in axes], 1)
def axis_after_dot(
axis: int,
contracting_dims: Sequence[int],
batch_dims: Sequence[int],
lhs_ndim: Optional[int] = None
) -> int:
if axis in batch_dims:
return batch_dims.index(axis)
return (
axis -
sum(1 for i in contracting_dims if i < axis) +
sum(1 for i in batch_dims if i > axis) +
(0 if lhs_ndim is None
else lhs_ndim - len(batch_dims) - len(contracting_dims))
)
def make_2d(
x: Optional[np.ndarray],
start_axis: int = 0,
end_axis: Optional[int] = None
) -> Optional[np.ndarray]:
"""Makes `x` 2D from `start_axis` to `end_axis`, preserving other axes.
`x` is assumed to follow the (`X, X, Y, Y, Z, Z`) axes layout.
Example:
>>> x = np.ones((1, 2, 3, 3, 4, 4))
>>> make_2d(x).shape == (12, 24)
>>> #
>>> make_2d(x, 2).shape == (1, 2, 12, 12)
>>> #
>>> make_2d(x, 2, 4).shape == (1, 2, 3, 3, 4, 4)
"""
if x is None:
return x
if end_axis is None:
end_axis = x.ndim
x = unzip_axes(x, start_axis, end_axis)
half_ndim = (end_axis - start_axis) // 2
x = x.reshape(x.shape[:start_axis] +
(size_at(x.shape[start_axis:start_axis + half_ndim]),
size_at(x.shape[start_axis + half_ndim:end_axis])) +
x.shape[end_axis:])
return x
def _read_keys(key, x1, x2):
"""Read dropout key.
`key` might be a tuple of two rng keys or a single rng key or None. In
either case, `key` will be mapped into two rng keys `key1` and `key2` to
make sure `(x1==x2) == (key1==key2)`.
"""
if key is None or all_none(x2):
key1 = key2 = key
elif isinstance(key, tuple) and len(key) == 2:
key1, key2 = key
new_key = np.where(x1_is_x2(key1, key2),
random.fold_in(key2, 1), key2)
key2 = np.where(x1_is_x2(x1, x2), key1, new_key)
warnings.warn('The value of `key[1]` might be replaced by a new value if '
'key[0] == key[1] and x1 != x2 or key[0] != key[1] and '
'x1 == x2.')
elif isinstance(key, np.ndarray):
key1 = key
key2 = np.where(x1_is_x2(x1, x2), key1, random.fold_in(key, 1))
else:
raise TypeError(type(key))
return key1, key2
def split_kwargs(kwargs, x1=None, x2=None):
"""Splitting `kwargs`.
Specifically,
1. if kwarg is an rng key, it will be split into two keys.
2. else if it is a tuple of length two, the tuple will be split into two
parts, one for kwargs1 and the other for kwargs2.
3. else it is copied to kwargs1 and kwargs2.
"""
kwargs1 = {}
kwargs2 = {}
for k, v in kwargs.items():
if x2 is not None and k == 'rng':
key1, key2 = _read_keys(v, x1, x2)
kwargs1[k] = key1
kwargs2[k] = key2
elif isinstance(v, tuple) and len(v) == 2:
kwargs1[k] = v[0]
kwargs2[k] = v[1]
else:
kwargs1[k] = kwargs2[k] = v
return kwargs1, kwargs2
_SingleSlice = Union[int, slice, type(Ellipsis)]
SliceType = Union[_SingleSlice, Tuple[_SingleSlice, ...]]
"""A type to specify a slice of an array.
For instance, when indexing `x[1, :, 2:8:3]` a slice tuple
`(1, slice(None), slice(2, 8, 3))` is created. But since slice functions cannot
accept slice specifications like `1, :, 2:8:3` as arguments, you must either
pass this object, or, for convenience, an :cls:`~neural_tangents.stax.Slice`
slice, such as `nt.stax.Slice[1, :, 2:8:3]`.
"""
def canonicalize_idx(
idx: SliceType,
ndim: int
) -> Tuple[Union[int, slice], ...]:
if idx is Ellipsis or isinstance(idx, (int, slice)):
idx = (idx,) + (slice(None),) * (ndim - 1)
for i, s in enumerate(idx):
if s is Ellipsis:
idx = idx[:i] + (slice(None),) * (ndim - len(idx) + 1) + idx[i + 1:]
idx += (slice(None),) * (ndim - len(idx))
return idx
def slice_shape(shape: Tuple[int, ...], idx: SliceType) -> Tuple[int, ...]:
# Keep `None` or negative-sized axes if they aren't indexed into.
canonical_idx = canonicalize_idx(idx, len(shape))
np_shape = list(shape)
unknown_axes = {}
n_ints = 0 # Keep track of vanishing axes due to integer indexing.
for a, (i, s) in enumerate(zip(canonical_idx, shape)):
if s < 0 or s is None:
if i == slice(None):
np_shape[a] = 0
unknown_axes[a - n_ints] = s
else:
raise ValueError(
f'Trying to index with {i} axis {a} of unknown size {s}. '
f'Please provide input shape {shape} with non-negative integer '
f'size at axis {a}.')
if isinstance(i, int):
n_ints += 1
out_shape = list(onp.empty(np_shape)[idx].shape)
for a, v in unknown_axes.items():
out_shape[a] = v
return tuple(out_shape)
_T = TypeVar('_T')
def double_tuple(x: Iterable[_T]) -> Tuple[_T, ...]:
return tuple(v for v in x for _ in range(2))
| 19,310 | 28.618098 | 110 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/utils/rules.py | """Structured derivatives rules."""
from .dataclasses import dataclass, field
import functools
from typing import Callable, Optional, Tuple, Dict, List, Union, Any
from . import utils
import jax
from jax import lax
from jax.core import JaxprEqn, ShapedArray, Primitive, Jaxpr, Var, AbstractValue, Literal
from jax.interpreters import ad
import jax.numpy as np
import numpy as onp
# pytype: disable=wrong-keyword-args
@dataclass
class Structure:
"""Describes structure present in a primitive derivative dy/dw.
# TODO(romann): make this a python dataclass.
Attributes:
out_trace:
axes of the primitive `y` output along which the primitive Jacobian
`dy/dw` is constant-block diagonal along the respective axes in the input
`in_trace`.
in_trace:
axes of the primitive `y` inputs along which the primitive Jacobian
`dy/dw` is constant-block diagonal along the respective axes in the output
`out_trace`.
in_trace_idxs:
indices of input variables to which `in_trace` axes are applied. Other
variables are considered untouched.
out_diagonal:
axes of the primitive `y` output along which the primitive Jacobian
`dy/dw` is (not constant) block diagonal along the respective axes in the
input `in_diagonal`.
in_diagonal:
axes of the primitive `y` inputs along which the primitive Jacobian
`dy/dw` is (not constant) block diagonal along the respective axes in the
output `out_diagonal`. Each entry in the `in_diagonal` tuple is a tuple of
length equal to the number of input variables; each entry in the tuple is
either an integer axis number correspomnding to the respective input
variable, or `None`, meaning that the respective variable is considered
untouched.
out_broadcast:
axes of the primitive `y` output along which the primitive Jacobian
`dy/dw` is block-tiled.
out_broadcast_idxs:
indices of input variables that need to be squeezed along the
`out_broadcast` axes in order for the primitive `y` to return the slice
that is being tiled along `out_broadcast` in the full output.
in_broadcast:
axes of the primitive `y` inputs along which the primitive Jacobian
`dy/dw` is block-tiled.
in_broadcast_idx:
indices of input variables that need to be squeezed along the
`in_broadcast` axes in order for the primitive Jacobian `dy/dw` to return
the slice that is being tiled along `in_broadcast` in the full output.
"""
out_trace: Tuple[int, ...] = field(False, default_factory=tuple)
in_trace: Tuple[int, ...] = field(False, default_factory=tuple)
in_trace_idxs: Tuple[int, ...] = field(False, default_factory=tuple)
out_diagonal: Tuple[int, ...] = field(False, default_factory=tuple)
in_diagonal: Tuple[Tuple[Optional[int], ...], ...] = field(
False, default_factory=tuple)
out_broadcast: Tuple[int, ...] = field(False, default_factory=tuple)
out_broadcast_idxs: Tuple[int, ...] = field(False, default_factory=tuple)
in_broadcast: Tuple[int, ...] = field(False, default_factory=tuple)
in_broadcast_idx: int = field(False, default_factory=int)
def __and__(self, other):
"""Defines interaction with structure of the other primitive dy2/dw."""
assert len(self.in_trace) == len(self.out_trace), (self, other)
assert len(other.in_trace) == len(other.out_trace), (self, other)
in_trace_idxs = self.in_trace_idxs
in_trace = tuple(i for i in self.in_trace if i in other.in_trace)
out_trace = tuple(self.out_trace[i] for i in range(len(self.out_trace))
if self.in_trace[i] in other.in_trace
)
assert len(in_trace) == len(out_trace), (self, other)
out_diagonal = tuple(i for i in self.out_diagonal
if i in other.out_diagonal)
in_diagonal = tuple(i for ix, i in enumerate(self.in_diagonal)
if self.out_diagonal[ix] in other.out_diagonal)
out_broadcast = tuple(i for i in self.out_broadcast
if i in other.out_broadcast)
in_broadcast = tuple(i for i in self.out_broadcast
if i in other.out_broadcast)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=in_trace_idxs,
out_diagonal=out_diagonal,
in_diagonal=in_diagonal,
out_broadcast=out_broadcast,
out_broadcast_idxs=self.out_broadcast_idxs,
in_broadcast=in_broadcast,
in_broadcast_idx=self.in_broadcast_idx,
)
STRUCTURE_RULES: Dict[Optional[Primitive], Callable[..., Structure]] = {}
JACOBIAN_RULES: Dict[Optional[Primitive], Callable[..., np.ndarray]] = {}
EQN_PARAMS_RULES: Dict[Optional[Primitive], Callable[..., Dict[str, Any]]] = {}
def get_structure(
eqn: Optional[JaxprEqn],
invals: List[Union[ShapedArray, AbstractValue]],
idx: int,
_s_rules: bool
) -> Structure:
if any(i is AbstractValue for i in invals):
raise TypeError(invals)
if eqn is None:
# Identity function
primitive = None
cts_in = invals[0]
assert idx == 0
else:
if len(eqn.outvars) != 1:
raise NotImplementedError(eqn)
cts_in = eqn.outvars[0].aval
primitive = eqn.primitive
assert len(invals) == len(eqn.invars)
assert 0 <= idx < len(eqn.invars)
if not isinstance(cts_in, ShapedArray):
raise TypeError(cts_in)
if primitive in STRUCTURE_RULES and _s_rules:
structure = STRUCTURE_RULES[primitive](eqn, idx, invals, cts_in)
else:
# No simplification rule found.
structure = Structure()
# TODO(romann): can we avoid special-casing `reshape`s?
if primitive == lax.reshape_p:
cts_in = ShapedArray(invals[idx].shape, invals[idx].dtype)
# Check that number of trace output and input axes match.
assert len(structure.in_trace) == len(structure.out_trace)
# Check that input and output traced sizes are the same.
out_trace_size = utils.size_at(cts_in, structure.out_trace)
in_trace_size = utils.size_at(invals[idx], structure.in_trace)
assert in_trace_size == out_trace_size
# Check that number of input/output diagonal axes match.
assert len(structure.out_diagonal) == len(structure.in_diagonal)
# Check for each output diagonal axis there's only input axes of correct
# size or `None`. Inval axis should be not `None`.
for out_d, in_d in zip(structure.out_diagonal, structure.in_diagonal):
assert len(in_d) == len(invals)
assert in_d[idx] is not None
for ix, i in enumerate(in_d):
if i is not None:
assert invals[ix].shape[i] == cts_in.shape[out_d]
return structure
def get_structure_cache(
jaxpr: Jaxpr,
_s_rules: bool
) -> Dict[Var, Structure]:
"""Associates a least common structure to each input variable of the `jaxpr`.
Args:
jaxpr: Jaxpr to build cache for.
_s_rules: whether to use structure rules or not.
Returns:
A dictionary mapping input variables to the least common structure of all
primitives it is present in as a direct input.
"""
invar_to_structure: Dict[Var, Structure] = {}
for var in jaxpr.invars:
if var in jaxpr.outvars:
if isinstance(var, Literal):
raise TypeError(var)
# Identity function
structure = get_id_structure(var.aval, _s_rules)
if var in invar_to_structure:
invar_to_structure[var] &= structure
else:
invar_to_structure[var] = structure
for eqn in jaxpr.eqns:
for i_eqn, var in enumerate(eqn.invars):
if var in jaxpr.invars:
if isinstance(var, Literal):
raise TypeError(var)
structure = get_structure(
eqn=eqn,
invals=[v.aval for v in eqn.invars],
idx=i_eqn,
_s_rules=_s_rules
)
if var in invar_to_structure:
invar_to_structure[var] &= structure
else:
invar_to_structure[var] = structure
return invar_to_structure
def get_id_structure(
inval: AbstractValue,
_s_rules: bool
) -> Structure:
if not isinstance(inval, ShapedArray):
raise TypeError(inval)
eqn = None
idx = 0
invals = [inval]
return get_structure(eqn, invals, idx, _s_rules)
# UTILS
def _eye_like(out_shaped: ShapedArray, in_shaped: ShapedArray) -> np.ndarray:
assert out_shaped.size == in_shaped.size, (out_shaped, in_shaped)
eye = np.eye(out_shaped.size, dtype=out_shaped.dtype)
eye = eye.reshape(out_shaped.shape + in_shaped.shape) # pytype: disable=unsupported-operands # always-use-return-annotations
return eye
# BINARY PRIMITIVES
def _dot_general_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
contracting_dims, batch_dims = eqn.params['dimension_numbers']
self, other = invals[idx], invals[1 if idx == 0 else 0]
self_c_dims = contracting_dims[idx]
self_b_dims = batch_dims[idx]
in_trace = tuple(i for i in range(self.ndim) if
(i not in self_c_dims) and (i not in self_b_dims))
out_trace = tuple(
utils.axis_after_dot(i, self_c_dims, self_b_dims,
lhs_ndim=None if idx == 0 else other.ndim)
for i in in_trace
)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
in_diagonal=tuple(zip(*batch_dims)),
out_diagonal=tuple(range(len(self_b_dims))),
)
def _dot_general_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
contracting_dims, batch_dims = eqn.params['dimension_numbers']
lhs_c_dims, rhs_c_dims = contracting_dims
lhs_b_dims, rhs_b_dims = batch_dims
lhs, rhs = invals
if idx == 0:
self = lhs
self_c_dims, self_b_dims = lhs_c_dims, lhs_b_dims
other = rhs
other_c_dims, other_b_dims = rhs_c_dims, rhs_b_dims
else:
self = rhs
self_c_dims, self_b_dims = rhs_c_dims, rhs_b_dims
other = lhs
other_c_dims, other_b_dims = lhs_c_dims, lhs_b_dims
self_ncb_dims = tuple(i for i in range(self.ndim)
if i not in self_c_dims + self_b_dims)
self_nc_dims = tuple(i for i in range(self.ndim)
if i not in self_c_dims)
j = np.moveaxis(
other,
other_b_dims + tuple(d[1]
for d in sorted(zip(self_c_dims, other_c_dims))),
tuple(range(len(other_b_dims))) + tuple(range(-len(other_c_dims), 0))
)
self_ncb_out = tuple(utils.axis_after_dot(
i,
self_c_dims,
self_b_dims,
other.ndim if idx == 1 else None
) for i in self_ncb_dims)
self_nc_in = tuple(cts_in.ndim + i for i in self_nc_dims)
j = np.expand_dims(j, self_ncb_out + self_nc_in)
self_ncb_size = utils.size_at(self, self_ncb_dims)
self_ncb_in = tuple(i + cts_in.ndim for i in self_ncb_dims)
shape = [1 for _ in range(j.ndim)]
for i_out, i_in in zip(self_ncb_out, self_ncb_in):
shape[i_out] = shape[i_in] = self.shape[i_in - cts_in.ndim]
eye = np.eye(self_ncb_size, dtype=np.bool_)
eye = eye.reshape(shape)
j = np.where(eye, j, np.zeros((), j.dtype))
for out_b, (self_b, other_b) in enumerate(zip(self_b_dims, other_b_dims)):
b_size = other.shape[other_b]
eye = np.eye(b_size, dtype=np.bool_)
shape = [1 for _ in range(j.ndim)]
shape[out_b] = shape[cts_in.ndim + self_b] = b_size
eye = eye.reshape(shape)
j = np.where(eye, j, np.zeros((), j.dtype))
return j
STRUCTURE_RULES[lax.dot_general_p] = _dot_general_s
JACOBIAN_RULES[lax.dot_general_p] = _dot_general_j
def _conv_general_dilated_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
if idx != 1:
raise NotImplementedError(eqn, idx)
lhs_spec, rhs_spec, out_spec = eqn.params['dimension_numbers']
batch_group_count = eqn.params['batch_group_count']
feature_group_count = eqn.params['feature_group_count']
lhs, rhs = invals
if (rhs.shape[rhs_spec[0]] == feature_group_count and
rhs.shape[rhs_spec[1]] == 1):
assert lhs.shape[lhs_spec[1]] == feature_group_count
return Structure(
in_trace=(),
in_trace_idxs=(),
out_trace=(),
in_diagonal=((lhs_spec[1], rhs_spec[0]),),
out_diagonal=(out_spec[1],)
)
elif (lhs.shape[lhs_spec[0]] == batch_group_count and
rhs.shape[rhs_spec[0]] == batch_group_count):
return Structure(
in_trace=(),
in_trace_idxs=(),
out_trace=(),
in_diagonal=((lhs_spec[0], rhs_spec[0]),),
out_diagonal=(out_spec[1],)
)
elif batch_group_count == feature_group_count == 1:
return Structure(
in_trace=(rhs_spec[0],),
in_trace_idxs=(idx,),
out_trace=(out_spec[1],),
out_diagonal=(),
in_diagonal=()
)
return Structure()
def _conv_general_dilated_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
if idx != 1:
raise NotImplementedError(eqn, idx)
lhs = invals[1 if idx == 0 else 0]
rhs = invals[idx]
ndim = cts_in.ndim
lhs_spec, rhs_spec, out_spec = eqn.params['dimension_numbers']
precision = eqn.params['precision']
n_groups_f = eqn.params['feature_group_count']
n_groups_b = eqn.params['batch_group_count']
n_channels_in = lhs.shape[lhs_spec[1]]
n_batch_in = lhs.shape[lhs_spec[0]]
group_size_out = rhs.shape[rhs_spec[0]] // (n_groups_f * n_groups_b)
group_size_in = n_channels_in // n_groups_f
batch_size_in = n_batch_in // n_groups_b
if isinstance(precision, tuple):
if precision[0] == precision[1]:
precision = precision[0]
else:
raise NotImplementedError(precision)
filter_shape = tuple(rhs.shape[i] for i in range(ndim) if i in rhs_spec[2:])
j = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=filter_shape,
window_strides=eqn.params['window_strides'],
padding=eqn.params['padding'],
lhs_dilation=eqn.params['lhs_dilation'],
rhs_dilation=eqn.params['rhs_dilation'],
dimension_numbers=eqn.params['dimension_numbers'],
precision=precision,
preferred_element_type=eqn.params['preferred_element_type']
)
if n_groups_b > 1:
j = np.moveaxis(j, (out_spec[0], out_spec[1]), (-1, -2))
j = j.reshape(j.shape[:-2] +
(n_channels_in, *filter_shape, n_groups_b, batch_size_in))
j = np.moveaxis(j, (-1, -2), (-2, -1))
else:
j = np.moveaxis(j, out_spec[1], -1)
rhs_shape = (n_groups_f, group_size_in) + filter_shape
j = j.reshape(j.shape[:ndim - 1] + rhs_shape)
j = np.moveaxis(j, (ndim - 1, ndim), (-1, -2))
j = np.vectorize(np.diag, signature='(k)->(k,k)')(j)
if n_groups_b > 1:
j = np.moveaxis(
j,
tuple(range(ndim - 2, j.ndim)),
[ndim + rhs_spec[1]] +
[ndim + i for i in sorted(rhs_spec[2:])] +
[out_spec[0], out_spec[1], ndim + rhs_spec[0]]
)
else:
j = np.moveaxis(
j,
tuple(range(ndim - 1, j.ndim)),
[ndim + i for i in sorted(rhs_spec[2:])] +
[ndim + rhs_spec[1], out_spec[1], ndim + rhs_spec[0]]
)
eye = np.eye(group_size_out, dtype=lhs.dtype)
eye = np.expand_dims(
eye,
[i for i in range(j.ndim) if i not in (out_spec[1], ndim + rhs_spec[0])]
)
j = np.kron(j, eye)
return j
def _conv_general_dilated_e(
params: Dict[str, Any],
idx: int,
trimmed_invals: List[ShapedArray],
trimmed_cts_in: ShapedArray
) -> Dict[str, Any]:
lhs, rhs = trimmed_invals
dn = params['dimension_numbers']
if (params['feature_group_count'] > lhs.shape[dn[0][1]] or
params['feature_group_count'] > rhs.shape[dn[1][0]]):
params['feature_group_count'] = 1
if (params['batch_group_count'] > rhs.shape[dn[1][0]] or
params['batch_group_count'] > lhs.shape[dn[0][0]]):
params['batch_group_count'] = 1
return params
STRUCTURE_RULES[lax.conv_general_dilated_p] = _conv_general_dilated_s
JACOBIAN_RULES[lax.conv_general_dilated_p] = _conv_general_dilated_j
EQN_PARAMS_RULES[lax.conv_general_dilated_p] = _conv_general_dilated_e
def _add_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
inval = invals[idx]
other = invals[1 if idx == 0 else 0]
if other.ndim == 0:
# Adding a scalar
out_trace = tuple(range(inval.ndim))
out_broadcast = ()
elif inval.ndim == 0:
# This array is a scalar
out_broadcast = tuple(range(other.ndim))
out_trace = ()
elif other.ndim == inval.ndim:
# Adding a broadcastable array.
out_trace = ()
out_broadcast = ()
for i in range(inval.ndim):
if other.shape[i] in (inval.shape[i], 1):
# Other array is broadcasted.
out_trace += (i,)
elif inval.shape[i] == 1:
# This array is broadcasted
out_broadcast += (i,)
else:
raise ValueError(inval.shape, other.shape)
else:
raise ValueError(inval.ndim, other.ndim)
return Structure(
out_trace=out_trace,
in_trace=out_trace,
in_trace_idxs=(0, 1),
out_diagonal=(),
in_diagonal=(),
out_broadcast=out_broadcast,
out_broadcast_idxs=(1 if idx == 0 else 0,)
)
def _add_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray,
is_sub: bool
) -> np.ndarray:
j = np.eye(utils.size_at(invals[idx]), dtype=invals[idx].dtype)
j = j.reshape(invals[idx].shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
j = np.broadcast_to(j, cts_in.shape + invals[idx].shape) # pytype: disable=unsupported-operands # always-use-return-annotations
if is_sub and idx == 1:
j = -j
return j
STRUCTURE_RULES[lax.add_p] = _add_s
JACOBIAN_RULES[lax.add_p] = functools.partial(_add_j, is_sub=False)
STRUCTURE_RULES[ad.add_jaxvals_p] = _add_s
JACOBIAN_RULES[ad.add_jaxvals_p] = functools.partial(_add_j, is_sub=False)
STRUCTURE_RULES[lax.sub_p] = _add_s
JACOBIAN_RULES[lax.sub_p] = functools.partial(_add_j, is_sub=True)
def _mul_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
inval = invals[idx]
ndim = inval.ndim
other = invals[1 if idx == 0 else 0]
out_diagonal = ()
in_diagonal = ()
if other.ndim == 0:
# Multiplication by a scalar
out_trace = tuple(range(ndim))
else:
# Multiplication by a broadcastable array.
out_trace = ()
for i in range(ndim):
if other.shape[i] == 1:
# Axis `i` is multiplied by a scalar.
out_trace += (i,)
else:
if other.shape[i] == inval.shape[i]:
out_diagonal += (i,)
in_diagonal += ((i, i),)
elif inval.shape[i] == 1:
# This array is broadcasted
pass
else:
raise ValueError(inval.shape, other.shape)
in_trace = out_trace
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=out_diagonal,
in_diagonal=in_diagonal,
)
def _mul_j(
eqn: JaxprEqn,
idx: int,
invals: List[Union[ShapedArray, np.ndarray]],
cts_in: ShapedArray,
is_div: bool
) -> np.ndarray:
if is_div and idx != 0:
raise ValueError(eqn, idx)
inval = invals[idx]
if inval.size == 0:
return np.zeros(cts_in.shape + inval.shape, inval.dtype) # pytype: disable=unsupported-operands # always-use-return-annotations
other = invals[1 if idx == 0 else 0]
if is_div:
other = np.ones((), other.dtype) / other
if inval.ndim == 0:
return other # pytype: disable=bad-return-type # jax-ndarray
if other.ndim == 0:
other = np.broadcast_to(other, inval.shape)
assert other.ndim == inval.ndim == cts_in.ndim
j = np.broadcast_to(other, cts_in.shape).reshape((-1,))
j = np.diag(j)
j = j.reshape(cts_in.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
sum_axes = ()
for i in range(inval.ndim):
if inval.shape[i] == 1:
sum_axes += (cts_in.ndim + i,)
j = np.sum(j, axis=sum_axes, keepdims=True)
return j
STRUCTURE_RULES[lax.mul_p] = _mul_s
JACOBIAN_RULES[lax.mul_p] = functools.partial(_mul_j, is_div=False)
STRUCTURE_RULES[lax.div_p] = _mul_s
JACOBIAN_RULES[lax.div_p] = functools.partial(_mul_j, is_div=True)
# N-ARY PRIMITIVES
def _concatenate_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
dimension = eqn.params['dimension']
out_trace = tuple(i for i in range(cts_in.ndim) if i != dimension)
in_trace = out_trace
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=tuple(range(len(invals))),
)
def _concatenate_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
dimension = eqn.params['dimension']
js = []
inval = invals[idx]
for i in range(len(invals)):
inval_i = invals[i]
inval_i_shape = tuple(inval_i.shape[k] if k == dimension else
inval.shape[k] for k in range(inval.ndim))
if i == idx:
j = np.eye(inval.size, dtype=inval.dtype)
else:
inval_i_size = onp.prod(inval_i_shape)
j = np.zeros((inval_i_size, inval.size), inval.dtype)
j = j.reshape(inval_i_shape + inval.shape) # pytype: disable=unsupported-operands # always-use-return-annotations
js.append(j)
j = lax.concatenate(js, dimension)
j = j.reshape(cts_in.shape + inval.shape) # pytype: disable=unsupported-operands # always-use-return-annotations
return j
STRUCTURE_RULES[lax.concatenate_p] = _concatenate_s
JACOBIAN_RULES[lax.concatenate_p] = _concatenate_j
# UNARY PRIMITIVES
def _rev_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
dimensions = eqn.params['dimensions']
in_trace = out_trace = tuple(i for i in range(invals[idx].ndim)
if i not in dimensions)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _rev_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
inval = invals[idx]
j = _eye_like(cts_in, inval)
j = lax.rev(j, eqn.params['dimensions'])
return j
STRUCTURE_RULES[lax.rev_p] = _rev_s
JACOBIAN_RULES[lax.rev_p] = _rev_j
def _broadcast_in_dim_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
broadcast_dimensions = eqn.params['broadcast_dimensions']
out_trace = broadcast_dimensions
in_trace = tuple(range(invals[idx].ndim))
out_broadcast = tuple(i for i in range(cts_in.ndim)
if i not in broadcast_dimensions)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
out_broadcast=out_broadcast,
)
def _broadcast_in_dim_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
inval = invals[idx]
j = np.eye(inval.size, dtype=inval.dtype)
j = j.reshape(inval.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
j = lax.broadcast_in_dim(
j,
cts_in.shape + inval.shape, # pytype: disable=unsupported-operands # always-use-return-annotations
broadcast_dimensions=eqn.params['broadcast_dimensions'] +
tuple(range(cts_in.ndim, cts_in.ndim + inval.ndim)))
return j
def _broadcast_in_dim_e(
params: Dict[str, Any],
idx: int,
trimmed_invals: List[ShapedArray],
trimmed_cts_in: ShapedArray
) -> Dict[str, Any]:
# `broadcast_in_dim` is the only primitive JVP where we need to change
# equation parameters in response to tweaking the inputs/cotangents
# shapes.
params['shape'] = trimmed_cts_in.shape
return params
STRUCTURE_RULES[lax.broadcast_in_dim_p] = _broadcast_in_dim_s
JACOBIAN_RULES[lax.broadcast_in_dim_p] = _broadcast_in_dim_j
EQN_PARAMS_RULES[lax.broadcast_in_dim_p] = _broadcast_in_dim_e
def _reduce_sum_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
axes = eqn.params['axes']
out_trace = tuple(range(cts_in.ndim))
in_trace = tuple(i for i in range(invals[idx].ndim) if i not in axes)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _reduce_sum_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
inval = invals[idx]
j = np.eye(cts_in.size, dtype=inval.dtype)
j = j.reshape(cts_in.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
j = np.expand_dims(j, tuple(a + cts_in.ndim for a in eqn.params['axes']))
j = np.broadcast_to(j, cts_in.shape + inval.shape) # pytype: disable=unsupported-operands # always-use-return-annotations
return j
STRUCTURE_RULES[lax.reduce_sum_p] = _reduce_sum_s
JACOBIAN_RULES[lax.reduce_sum_p] = _reduce_sum_j
def _reduce_window_sum_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
out_trace = ()
for i in range(cts_in.ndim):
if (eqn.params['base_dilation'][i] == 1 and
eqn.params['padding'][i] == (0, 0) and
eqn.params['window_dilation'][i] == 1 and
eqn.params['window_dimensions'][i] == 1 and
eqn.params['window_strides'][i] == 1):
out_trace += (i,)
in_trace = out_trace
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
)
STRUCTURE_RULES[lax.reduce_window_sum_p] = _reduce_window_sum_s
def _pad_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
padding_config = eqn.params['padding_config']
out_trace = tuple(i for i in range(cts_in.ndim)
if padding_config[i] == (0, 0, 0))
in_trace = out_trace
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _pad_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
padding_config = eqn.params['padding_config']
inval = invals[idx]
j = np.eye(inval.size, dtype=inval.dtype)
j = j.reshape(inval.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
for _ in range(inval.ndim):
padding_config += ((0, 0, 0),)
j = lax.pad(j, np.zeros((), j.dtype), padding_config)
return j
STRUCTURE_RULES[lax.pad_p] = _pad_s
JACOBIAN_RULES[lax.pad_p] = _pad_j
def _reshape_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
out_trace = tuple(range(invals[idx].ndim))
if eqn.params['dimensions'] is None:
in_trace = out_trace
else:
in_trace = tuple(eqn.params['dimensions'].index(i) for i in out_trace)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _reshape_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
inval = invals[idx]
j = _eye_like(inval, inval)
j = j.reshape(inval.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
inval_dims = tuple(i + inval.ndim for i in range(inval.ndim))
if eqn.params['dimensions'] is not None:
j = lax.transpose(j, eqn.params['dimensions'] + inval_dims)
j = j.reshape(inval.shape + inval.shape) # pytype: disable=unsupported-operands # always-use-return-annotations
return j
def _reshape_e(
params: Dict[str, Any],
idx: int,
trimmed_invals: List[ShapedArray],
trimmed_cts_in: ShapedArray
) -> Dict[str, Any]:
# Hack for more efficient `reshape` structure rule.
params['new_sizes'] = trimmed_invals[idx].shape
return params
STRUCTURE_RULES[lax.reshape_p] = _reshape_s
JACOBIAN_RULES[lax.reshape_p] = _reshape_j
EQN_PARAMS_RULES[lax.reshape_p] = _reshape_e
def _eye_s(
eqn: Optional[JaxprEqn],
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
"""Use this for elementwise-linear in `p` primitives `y(p, x)`.
Precisely, require that `y(p, x)_k(i) = g(x)(p_i)` for some function `g(x)`
and an index bijection `k: i -> j`.
Note: multiplication doesn't satisfy this, since `y(p, x)_i = g(p_i, x_i)`.
In this case the derivative matrix `dy/dp` is a constant-diagonal matrix, and
all input-output axes can be collapsed.
"""
out_trace = tuple(range(cts_in.ndim))
in_trace = tuple(range(invals[idx].ndim))
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _eye_j(
eqn: Optional[JaxprEqn],
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
j = _eye_like(cts_in, invals[idx])
return j
# Identity
STRUCTURE_RULES[None] = _eye_s
JACOBIAN_RULES[None] = _eye_j
def _neg_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
j = _eye_like(cts_in, invals[idx])
return -j
STRUCTURE_RULES[lax.neg_p] = _eye_s
JACOBIAN_RULES[lax.neg_p] = _neg_j
def _zeros_like_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
return np.zeros(cts_in.shape + invals[idx].shape, cts_in.dtype) # pytype: disable=unsupported-operands # always-use-return-annotations
STRUCTURE_RULES[jax.ad.zeros_like_p] = _eye_s
JACOBIAN_RULES[jax.ad.zeros_like_p] = _zeros_like_j
def _transpose_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
in_trace = tuple(range(cts_in.ndim))
out_trace = tuple(eqn.params['permutation'].index(i) for i in in_trace)
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
def _transpose_j(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> np.ndarray:
j = _eye_like(cts_in, invals[idx])
inval = invals[idx]
j = j.reshape(inval.shape * 2) # pytype: disable=unsupported-operands # always-use-return-annotations
inval_dims = tuple(i + cts_in.ndim for i in range(cts_in.ndim))
j = lax.transpose(j, eqn.params['permutation'] + inval_dims)
j = j.reshape(cts_in.shape + invals[idx].shape) # pytype: disable=unsupported-operands # always-use-return-annotations
return j
STRUCTURE_RULES[lax.transpose_p] = _transpose_s
JACOBIAN_RULES[lax.transpose_p] = _transpose_j
def _squeeze_s(
eqn: JaxprEqn,
idx: int,
invals: List[ShapedArray],
cts_in: ShapedArray
) -> Structure:
out_trace = tuple(range(cts_in.ndim))
in_trace = tuple(i for i in range(invals[idx].ndim)
if i not in eqn.params['dimensions'])
return Structure(
out_trace=out_trace,
in_trace=in_trace,
in_trace_idxs=(idx,),
out_diagonal=(),
in_diagonal=(),
)
STRUCTURE_RULES[lax.squeeze_p] = _squeeze_s
JACOBIAN_RULES[lax.squeeze_p] = _eye_j
STRUCTURE_RULES[lax.convert_element_type_p] = _eye_s
JACOBIAN_RULES[lax.convert_element_type_p] = _eye_j
STRUCTURE_RULES[lax.device_put_p] = _eye_s
JACOBIAN_RULES[lax.device_put_p] = _eye_j
copy_p = jax.lax.copy_p
STRUCTURE_RULES[copy_p] = _eye_s
JACOBIAN_RULES[copy_p] = _eye_j
| 31,709 | 27.3125 | 138 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/utils/dataclasses.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining dataclasses that can be used with jax transformations.
This code was copied and adapted from https://github.com/google/flax/struct.py.
Accessed on 03/23/2020.
"""
import dataclasses
import functools
from typing import Any, Dict, Tuple
import jax
def dataclass(clz):
"""Create a class which can be passed to functional transformations.
Jax transformations such as `jax.jit` and `jax.grad` require objects that are
immutable and can be mapped over using the `jax.tree_util` functions.
The `dataclass` decorator makes it easy to define custom classes that can be
passed safely to Jax.
Example:
>>> from jax import jit, numpy as np
>>> from neural_tangents._src.utils import dataclasses
>>> #
>>> @dataclasses.dataclass
>>> class Data:
>>> array: np.ndarray
>>> a_boolean: bool = dataclasses.field(pytree_node=False)
>>> #
>>> data = Data(np.array([1.0]), True)
>>> #
>>> data.array = np.array([2.0]) # Data is immutable. Will raise an error.
>>> data = data.replace(array=np.array([2.0])) # Use the replace method.
>>> #
>>> # This class can now be used safely in Jax.
>>> jit(lambda data: data.array if data.a_boolean else 0)(data)
Args:
clz: the class that will be transformed by the decorator.
Returns:
The new class.
"""
data_clz = dataclasses.dataclass(frozen=True)(clz)
meta_fields = []
data_fields = []
for name, field_info in data_clz.__dataclass_fields__.items():
is_pytree_node = field_info.metadata.get('pytree_node', True)
init = field_info.metadata.get('init', True)
if init:
if is_pytree_node:
data_fields.append(name)
else:
meta_fields.append(name)
def iterate_clz(x):
meta = tuple(getattr(x, name) for name in meta_fields)
data = tuple(getattr(x, name) for name in data_fields)
return data, meta
def clz_from_iterable(meta, data):
meta_args = tuple(zip(meta_fields, meta))
data_args = tuple(zip(data_fields, data))
kwargs = dict(meta_args + data_args)
return data_clz(**kwargs)
jax.tree_util.register_pytree_node(data_clz,
iterate_clz,
clz_from_iterable)
@functools.wraps(
dataclasses.replace,
assigned=('__module__', '__name__', '__qualname__', '__annotations__'))
def replace(self: data_clz, **kwargs) -> data_clz:
"""Instance method alternative to `dataclasses.replace`."""
return dataclasses.replace(self, **kwargs)
@functools.wraps(
dataclasses.asdict,
assigned=('__module__', '__name__', '__qualname__', '__annotations__'))
def asdict(self: data_clz) -> Dict[str, Any]:
"""Instance method alternative to `dataclasses.asdict`."""
return {
f.name: getattr(self, f.name)
for f in dataclasses.fields(self)
}
@functools.wraps(
dataclasses.astuple,
assigned=('__module__', '__name__', '__qualname__', '__annotations__'))
def astuple(self: data_clz) -> Tuple[Any, ...]:
"""Instance method alternative to `dataclasses.astuple`."""
return tuple(
getattr(self, f.name)
for f in dataclasses.fields(self)
)
data_clz.replace = replace
data_clz.asdict = asdict
data_clz.astuple = astuple
return data_clz
def field(pytree_node: bool = True, **kwargs):
metadata = {'pytree_node': pytree_node}
if 'init' in kwargs:
metadata['init'] = kwargs['init']
return dataclasses.field(metadata=metadata, **kwargs)
| 4,106 | 31.338583 | 80 | py |
neural-tangents | neural-tangents-main/neural_tangents/_src/utils/kernel.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class with infinite-width NTK and NNGP :class:`jax.numpy.ndarray` fields."""
import operator as op
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from . import dataclasses
from . import utils
from jax import lax
import jax.numpy as np
@dataclasses.dataclass
class Kernel:
"""Dataclass containing information about the NTK and NNGP of a model.
Attributes:
nngp:
covariance between the first and second batches (NNGP). A `np.ndarray` of
shape
`(batch_size_1, batch_size_2, height, [height,], width, [width,], ...))`,
where exact shape depends on `diagonal_spatial`.
ntk:
the neural tangent kernel (NTK). `np.ndarray` of same shape as `nngp`.
cov1:
covariance of the first batch of inputs. A `np.ndarray` with shape
`(batch_size_1, [batch_size_1,] height, [height,], width, [width,], ...)`
where exact shape depends on `diagonal_batch` and `diagonal_spatial`.
cov2:
optional covariance of the second batch of inputs. A `np.ndarray` with
shape
`(batch_size_2, [batch_size_2,] height, [height,], width, [width,], ...)`
where the exact shape depends on `diagonal_batch` and `diagonal_spatial`.
x1_is_x2:
a boolean specifying whether `x1` and `x2` are the same.
is_gaussian:
a boolean, specifying whether the output features or channels of the layer
/ NN function (returning this `Kernel` as the `kernel_fn`) are i.i.d.
Gaussian with covariance `nngp`, conditioned on fixed inputs to the layer
and i.i.d. Gaussian weights and biases of the layer. For example, passing
an input through a CNN layer with i.i.d. Gaussian weights and biases
produces i.i.d. Gaussian random variables along the channel dimension,
while passing an input through a nonlinearity does not.
is_reversed:
a boolean specifying whether the covariance matrices `nngp`, `cov1`,
`cov2`, and `ntk` have the ordering of spatial dimensions reversed.
Ignored unless `diagonal_spatial` is `False`. Used internally to avoid
self-cancelling transpositions in a sequence of CNN layers that flip the
order of kernel spatial dimensions.
is_input:
a boolean specifying whether the current layer is the input layer and it
is used to avoid applying dropout to the input layer.
diagonal_batch:
a boolean specifying whether `cov1` and `cov2` store only the diagonal of
the sample-sample covariance (`diagonal_batch == True`,
`cov1.shape == (batch_size_1, ...)`), or the full covariance
(`diagonal_batch == False`,
`cov1.shape == (batch_size_1, batch_size_1, ...)`). Defaults to `True` as
no current layers require the full covariance.
diagonal_spatial:
a boolean specifying whether all (`cov1`, `ntk`, etc.) covariance matrices
store only the diagonals of the location-location covariances
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, depth, ...)`),
or the full covariance (`diagonal_spatial == False`, `nngp.shape ==
(batch_size_1, batch_size_2, height, height, width, width, depth, depth,
...)`).
Defaults to `False`, but is set to `True` if the
output top-layer covariance depends only on the diagonals (e.g. when a CNN
network has no pooling layers and `Flatten` on top).
shape1:
a tuple specifying the shape of the random variable in the first batch of
inputs. These have covariance `cov1` and covariance with the second batch
of inputs given by `nngp`.
shape2:
a tuple specifying the shape of the random variable in the second batch of
inputs. These have covariance `cov2` and covariance with the first batch
of inputs given by `nngp`.
batch_axis:
the batch axis of the activations.
channel_axis:
channel axis of the activations (taken to infinity).
mask1:
an optional boolean `np.ndarray` with a shape broadcastable to `shape1`
(and the same number of dimensions). `True` stands for the input being
masked at that position, while `False` means the input is visible. For
example, if `shape1 == (5, 32, 32, 3)` (a batch of 5 `NHWC` CIFAR10
images), a `mask1` of shape `(5, 1, 32, 1)` means different images can
have different blocked columns (`H` and `C` dimensions are always either
both blocked or unblocked). `None` means no masking.
mask2:
same as `mask1`, but for the second batch of inputs.
"""
nngp: np.ndarray
ntk: Optional[np.ndarray]
cov1: np.ndarray
cov2: Optional[np.ndarray]
x1_is_x2: np.ndarray
is_gaussian: bool = dataclasses.field(pytree_node=False)
is_reversed: bool = dataclasses.field(pytree_node=False)
is_input: bool = dataclasses.field(pytree_node=False)
diagonal_batch: bool = dataclasses.field(pytree_node=False)
diagonal_spatial: bool = dataclasses.field(pytree_node=False)
shape1: Tuple[int, ...] = dataclasses.field(pytree_node=False)
shape2: Tuple[int, ...] = dataclasses.field(pytree_node=False)
batch_axis: int = dataclasses.field(pytree_node=False)
channel_axis: int = dataclasses.field(pytree_node=False)
mask1: Optional[np.ndarray] = None
mask2: Optional[np.ndarray] = None
replace = ... # type: Callable[..., 'Kernel']
asdict = ... # type: Callable[[], Dict[str, Any]]
astuple = ... # type: Callable[[], Tuple[Any, ...]]
def slice(self, n1_slice: slice, n2_slice: slice) -> 'Kernel':
cov1 = self.cov1[n1_slice]
cov2 = self.cov1[n2_slice] if self.cov2 is None else self.cov2[n2_slice]
ntk = self.ntk
mask1 = None if self.mask1 is None else self.mask1[n1_slice]
mask2 = None if self.mask2 is None else self.mask2[n2_slice]
return self.replace(
cov1=cov1,
nngp=self.nngp[n1_slice, n2_slice],
cov2=cov2,
ntk=ntk if ntk is None or ntk.ndim == 0 else ntk[n1_slice, n2_slice],
shape1=(cov1.shape[0],) + self.shape1[1:],
shape2=(cov2.shape[0],) + self.shape2[1:],
mask1=mask1,
mask2=mask2)
def reverse(self) -> 'Kernel':
"""Reverse the order of spatial axes in the covariance matrices.
Returns:
A `Kernel` object with spatial axes order flipped in
all covariance matrices. For example, if `kernel.nngp` has shape
`(batch_size_1, batch_size_2, H, H, W, W, D, D, ...)`, then
`reverse(kernels).nngp` has shape
`(batch_size_1, batch_size_2, ..., D, D, W, W, H, H)`.
"""
batch_ndim = 1 if self.diagonal_batch else 2
cov1 = utils.reverse_zipped(self.cov1, batch_ndim)
cov2 = utils.reverse_zipped(self.cov2, batch_ndim)
nngp = utils.reverse_zipped(self.nngp, 2)
ntk = utils.reverse_zipped(self.ntk, 2)
return self.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_reversed=not self.is_reversed)
def transpose(self, axes: Optional[Sequence[int]] = None) -> 'Kernel':
"""Permute spatial dimensions of the `Kernel` according to `axes`.
Follows
https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html
Note that `axes` apply only to spatial axes, batch axes are ignored and
remain leading in all covariance arrays, and channel axes are not present
in a `Kernel` object. If the covariance array is of shape
`(batch_size, X, X, Y, Y)`, and `axes == (0, 1)`, resulting array is of
shape `(batch_size, Y, Y, X, X)`.
"""
if axes is None:
axes = tuple(range(len(self.shape1) - 2))
def permute(mat: Optional[np.ndarray],
batch_ndim: int) -> Optional[np.ndarray]:
if mat is not None:
_axes = tuple(batch_ndim + a for a in axes)
if not self.diagonal_spatial:
_axes = tuple(j for a in _axes
for j in (2 * a - batch_ndim,
2 * a - batch_ndim + 1))
_axes = tuple(range(batch_ndim)) + _axes
return np.transpose(mat, _axes)
return mat
cov1 = permute(self.cov1, 1 if self.diagonal_batch else 2)
cov2 = permute(self.cov2, 1 if self.diagonal_batch else 2)
nngp = permute(self.nngp, 2)
ntk = permute(self.ntk, 2)
return self.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
def mask(self,
mask1: Optional[np.ndarray],
mask2: Optional[np.ndarray]) -> 'Kernel':
"""Mask all covariance matrices according to `mask1`, `mask2`."""
mask11, mask12, mask22 = self._get_mask_prods(mask1, mask2)
cov1 = utils.mask(self.cov1, mask11)
cov2 = utils.mask(self.cov2, mask22)
nngp = utils.mask(self.nngp, mask12)
ntk = utils.mask(self.ntk, mask12)
return self.replace(cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
mask1=mask1,
mask2=mask2)
def _get_mask_prods(
self,
mask1: Optional[np.ndarray],
mask2: Optional[np.ndarray]
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray]]:
"""Gets outer products of `mask1, mask1`, `mask1, mask2`, `mask2, mask2`."""
def get_mask_prod(m1, m2, batch_ndim):
if m1 is None and m2 is None:
return None
def reshape(m):
if m is not None:
if m.shape[self.channel_axis] != 1:
raise NotImplementedError(
f'Different channel-wise masks are not supported for '
f'infinite-width layers now (got `mask.shape == {m.shape}). '
f'Please describe your use case at '
f'https://github.com/google/neural-tangents/issues/new')
m = np.squeeze(np.moveaxis(m, (self.batch_axis, self.channel_axis),
(0, -1)), -1)
if self.is_reversed:
m = np.moveaxis(m, range(1, m.ndim), range(m.ndim - 1, 0, -1))
return m
m1, m2 = reshape(m1), reshape(m2)
start_axis = 2 - batch_ndim
end_axis = 1 if self.diagonal_spatial else m1.ndim
mask = utils.outer_prod(m1, m2, start_axis, end_axis, op.or_)
return mask
mask11 = get_mask_prod(mask1, mask1, 1 if self.diagonal_batch else 2)
mask22 = (get_mask_prod(mask2, mask2, 1 if self.diagonal_batch else 2)
if mask2 is not None else mask11)
mask12 = get_mask_prod(mask1, mask2, 2)
return mask11, mask12, mask22
def dot_general(
self,
other1: Optional[np.ndarray],
other2: Optional[np.ndarray],
is_lhs: bool,
dimension_numbers: lax.DotDimensionNumbers
) -> 'Kernel':
"""Covariances of :obj:`jax.lax.dot_general` of `x1/2` with `other1/2`."""
if other1 is None and other2 is None:
return self
if other1 is not None and other2 is not None:
if other1.ndim != other2.ndim:
raise NotImplementedError(
f'Factors 1/2 with different dimensionality not implemented, got '
f'{other1.ndim} and {other2.ndim}.')
if is_lhs:
(other_cs, input_cs), (other_bs, input_bs) = dimension_numbers
else:
(input_cs, other_cs), (input_bs, other_bs) = dimension_numbers
n_input = len(self.shape1)
if other1 is not None:
n_other = other1.ndim
elif other2 is not None:
n_other = other2.ndim
else:
raise ValueError(other1, other2)
input_cs = utils.mod(input_cs, n_input)
input_bs = utils.mod(input_bs, n_input)
other_cs = utils.mod(other_cs, n_other)
other_bs = utils.mod(other_bs, n_other)
other_dims = other_bs + other_cs
input_dims = input_bs + input_cs
def to_kernel_dim(i: int, batch_ndim: int, is_left: bool) -> int:
if i == self.batch_axis:
i = 0 if (is_left or batch_ndim == 1) else 1
elif i == self.channel_axis:
raise ValueError(f'Batch or contracting dimension {i} cannot be equal '
f'to `channel_axis`.')
else:
i -= int(i > self.batch_axis) + int(i > self.channel_axis)
i = batch_ndim + (1 if self.diagonal_spatial else 2) * i
i += not is_left and not self.diagonal_spatial
return i
def get_other_dims(batch_ndim: int, is_left: bool) -> List[int]:
dims = [-i - 1 - (0 if is_left or self.diagonal_spatial else n_other)
for i in range(n_other)]
for i_inputs, i_other in zip(input_dims, other_dims):
dims[i_other] = to_kernel_dim(i_inputs, batch_ndim, is_left)
return dims
def get_mat_non_c_dims(batch_ndim: int) -> List[int]:
input_non_c_dims = input_bs + [
i for i in range(n_input)
if i not in input_cs + input_bs + [self.channel_axis]
]
# Batch axes are always leading in `mat`.
if self.batch_axis in input_non_c_dims:
input_non_c_dims.remove(self.batch_axis)
input_non_c_dims.insert(0, self.batch_axis)
mat_non_c_dims = []
for i in input_non_c_dims:
left = to_kernel_dim(i, batch_ndim, True)
right = to_kernel_dim(i, batch_ndim, False)
mat_non_c_dims += [left] if left == right else [left, right]
return mat_non_c_dims
def get_other_non_c_dims() -> List[int]:
other_non_c_dims = [-i - 1 for i in range(n_other) if i not in other_dims]
if not self.diagonal_spatial:
other_non_c_dims = list(utils.zip_flat(
other_non_c_dims,
[-i - 1 - n_other for i in range(n_other) if i not in other_dims]))
return other_non_c_dims
def get_out_dims(batch_ndim: int) -> List[int]:
mat_non_c_dims = get_mat_non_c_dims(batch_ndim)
other_non_c_dims = get_other_non_c_dims()
n_b_spatial = len(input_bs) - (1 if self.batch_axis in input_bs else 0)
n_b = (len(mat_non_c_dims) if not is_lhs else
(((0 if self.batch_axis in input_cs else batch_ndim) +
(1 if self.diagonal_spatial else 2) * n_b_spatial)))
return mat_non_c_dims[:n_b] + other_non_c_dims + mat_non_c_dims[n_b:]
def dot(mat: Optional[np.ndarray],
batch_ndim: int,
other1: Optional[np.ndarray] = None,
other2: Optional[np.ndarray] = None) -> Optional[np.ndarray]:
if mat is None or mat.ndim == 0 or other1 is None and other2 is None:
return mat
operands = ()
if other1 is not None:
other1_dims = get_other_dims(batch_ndim, True)
operands += (other1, other1_dims)
mat_dims = list(range(mat.ndim))
if self.is_reversed:
mat_dims = utils.reverse_zipped(mat_dims, batch_ndim)
operands += (mat, mat_dims)
if other2 is not None:
other2_dims = get_other_dims(batch_ndim, False)
operands += (other2, other2_dims)
return np.einsum(*operands, get_out_dims(batch_ndim), optimize=True)
cov1 = dot(self.cov1, 1 if self.diagonal_batch else 2, other1, other1)
cov2 = dot(self.cov2, 1 if self.diagonal_batch else 2, other2, other2)
nngp = dot(self.nngp, 2, other1, other2)
ntk = dot(self.ntk, 2, other1, other2)
lhs_ndim = n_other if is_lhs else None
return self.replace(
cov1=cov1,
nngp=nngp,
cov2=cov2,
ntk=ntk,
is_reversed=False,
batch_axis=utils.axis_after_dot(self.batch_axis, input_cs,
input_bs, lhs_ndim),
channel_axis=utils.axis_after_dot(self.channel_axis, input_cs,
input_bs, lhs_ndim)
)
def __mul__(self, other: float) -> 'Kernel':
var = other**2
return self.replace(cov1=var * self.cov1,
nngp=var * self.nngp,
cov2=None if self.cov2 is None else var * self.cov2,
ntk=None if self.ntk is None else var * self.ntk)
__rmul__ = __mul__
def __add__(self, other: float) -> 'Kernel':
var = other**2
return self.replace(cov1=var + self.cov1,
nngp=var + self.nngp,
cov2=None if self.cov2 is None else var + self.cov2)
__sub__ = __add__
def __truediv__(self, other: float) -> 'Kernel':
return self.__mul__(1. / other)
def __neg__(self) -> 'Kernel':
return self
__pos__ = __neg__
def __getitem__(self, idx: utils.SliceType) -> 'Kernel':
idx = utils.canonicalize_idx(idx, len(self.shape1))
channel_idx = idx[self.channel_axis]
batch_idx = idx[self.batch_axis]
# Not allowing to index the channel axis.
if channel_idx != slice(None):
raise NotImplementedError(
f'Indexing into the (infinite) channel axis {self.channel_axis} not '
f'supported.'
)
# Removing the batch.
if isinstance(batch_idx, int):
raise NotImplementedError(
f'Indexing an axis with an integer index (e.g. `0` vs `(0,)` removes '
f'the respective axis. Neural Tangents requires there to always be a '
f'batch axis ({self.batch_axis}), so it cannot be indexed with '
f'integers (please use tuples or `slice` instead).'
)
spatial_idx = tuple(s for i, s in enumerate(idx) if i not in
(self.batch_axis, self.channel_axis))
if self.is_reversed:
spatial_idx = spatial_idx[::-1]
if not self.diagonal_spatial:
spatial_idx = utils.double_tuple(spatial_idx)
nngp_batch_slice = (batch_idx, batch_idx)
cov_batch_slice = (batch_idx,) if self.diagonal_batch else (batch_idx,) * 2
nngp_slice = nngp_batch_slice + spatial_idx
cov_slice = cov_batch_slice + spatial_idx
nngp = self.nngp[nngp_slice]
ntk = (self.ntk if (self.ntk is None or self.ntk.ndim == 0) else # pytype: disable=attribute-error
self.ntk[nngp_slice])
cov1 = self.cov1[cov_slice]
cov2 = None if self.cov2 is None else self.cov2[cov_slice]
# Axes may shift if some indices are integers (and not tuples / slices).
channel_axis = self.channel_axis
batch_axis = self.batch_axis
for i, s in reversed(list(enumerate(idx))):
if isinstance(s, int):
if i < channel_axis:
channel_axis -= 1
if i < batch_axis:
batch_axis -= 1
return self.replace(
nngp=nngp,
ntk=ntk,
cov1=cov1,
cov2=cov2,
channel_axis=channel_axis,
batch_axis=batch_axis,
shape1=utils.slice_shape(self.shape1, idx),
shape2=utils.slice_shape(self.shape2, idx),
mask1=None if self.mask1 is None else self.mask1[idx],
mask2=None if self.mask2 is None else self.mask2[idx],
)
| 19,210 | 36.742633 | 103 | py |
neural-tangents | neural-tangents-main/neural_tangents/experimental/empirical_tf/empirical.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental prototype of empirical NTK computation in Tensorflow.
This module is applicable to :class:`tf.Module`, :class:`tf.keras.Model`, or
:obj:`tf.function` functions, subject to some conditions (see docstring of
:obj:`empirical_ntk_fn_tf`).
The kernel function follows the API of :obj:`neural_tangents.empirical_ntk_fn`.
Please read the respective docstring for more details.
.. warning::
This module currently appears to have long compile times (but OK runtime),
is prone to triggering XLA errors, and does not distinguish between trainable
and non-trainable parameters of the model.
For details about the empirical (finite width) NTK computation, please see
"`Fast Finite Width Neural Tangent Kernel <https://arxiv.org/abs/2206.08720>`_".
Example:
>>> import tensorflow as tf
>>> from tensorflow.keras import layers
>>> import neural_tangents as nt
>>> #
>>> x_train = tf.random.normal((20, 32, 32, 3))
>>> x_test = tf.random.normal((5, 32, 32, 3))
>>> #
>>> # A CNN.
>>> f = tf.keras.Sequential()
>>> f.add(layers.Conv2D(32, (3, 3), activation='relu',
>>> input_shape=x_train.shape[1:]))
>>> f.add(layers.Conv2D(32, (3, 3), activation='relu'))
>>> f.add(layers.Conv2D(32, (3, 3)))
>>> f.add(layers.Flatten())
>>> f.add(layers.Dense(10))
>>> #
>>> f.build((None, *x_train.shape[1:]))
>>> _, params = nt.experimental.get_apply_fn_and_params(f)
>>> #
>>> # Default setting: reducing over logits (default `trace_axes=(-1,)`;
>>> # pass `vmap_axes=0` because the network is iid along the batch axis, no
>>> # BatchNorm.
>>> kernel_fn = nt.experimental.empirical_ntk_fn_tf(f, vmap_axes=0)
>>> #
>>> # (5, 20) tf.Tensor test-train NTK
>>> nngp_test_train = kernel_fn(x_test, x_train, params)
>>> ntk_test_train = kernel_fn(x_test, x_train, params)
>>> #
>>> # Full kernel: not reducing over logits.
>>> kernel_fn = nt.experimental.empirical_ntk_fn_tf(f, trace_axes=(),
>>> vmap_axes=0)
>>> #
>>> # (5, 20, 10, 10) tf.Tensor test-train NTK.
>>> k_test_train = kernel_fn(x_test, x_train, params)
>>> #
>>> # An FCN
>>> f = tf.keras.Sequential()
>>> f.add(layers.Flatten())
>>> f.add(layers.Dense(1024, activation='relu'))
>>> f.add(layers.Dense(1024, activation='relu'))
>>> f.add(layers.Dense(10))
>>> #
>>> f.build((None, *x_train.shape[1:]))
>>> _, params = nt.experimental.get_apply_fn_and_params(f)
>>> #
>>> # Use ntk-vector products since the network has many parameters
>>> # relative to the cost of forward pass.
>>> ntk_fn = nt.experimental.empirical_ntk_fn_tf(f, vmap_axes=0,
>>> implementation=2)
>>> #
>>> # (5, 5) tf.Tensor test-test NTK
>>> ntk_test_test = ntk_fn(x_test, None, params)
>>> #
>>> # Compute only NTK diagonal variances:
>>> ntk_fn = nt.experimental.empirical_ntk_fn_tf(f, diagonal_axes=(0,))
>>> #
>>> # (20,) tf.Tensor train-train NTK diagonal
>>> ntk_train_train_diag = ntk_fn(x_train, None, params)
"""
from typing import Callable, Optional, Union
import warnings
from jax.experimental import jax2tf
from neural_tangents._src.empirical import NtkImplementation, empirical_ntk_fn, DEFAULT_NTK_IMPLEMENTATION, _DEFAULT_NTK_FWD, _DEFAULT_NTK_J_RULES, _DEFAULT_NTK_S_RULES
from neural_tangents._src.utils.typing import Axes, PyTree, VMapAxes
import tensorflow as tf
import tf2jax
def empirical_ntk_fn_tf(
f: Union[tf.Module, tf.types.experimental.GenericFunction],
trace_axes: Axes = (-1,),
diagonal_axes: Axes = (),
vmap_axes: VMapAxes = None,
implementation: Union[NtkImplementation, int] = DEFAULT_NTK_IMPLEMENTATION,
_j_rules: bool = _DEFAULT_NTK_J_RULES,
_s_rules: bool = _DEFAULT_NTK_S_RULES,
_fwd: Optional[bool] = _DEFAULT_NTK_FWD,
) -> Callable[..., PyTree]:
r"""Returns a function to draw a single sample the NTK of a given network `f`.
This function follows the API of :obj:`neural_tangents.empirical_ntk_fn`, but
is applicable to Tensorflow :class:`tf.Module`, :class:`tf.keras.Model`, or
:obj:`tf.function`, via a TF->JAX->TF roundtrip using `tf2jax` and `jax2tf`.
Docstring below adapted from :obj:`neural_tangents.empirical_ntk_fn`.
.. warning::
This function is experimental and risks returning wrong results or
performing slowly. It is intended to demonstrate the usage of
:obj:`neural_tangents.empirical_ntk_fn` in Tensorflow, but has not been
extensively tested. Specifically, it appears to have very long
compile times (but OK runtime), is prone to triggering XLA errors, and does
not distinguish between trainable and non-trainable parameters of the model.
TODO(romann): support division between trainable and non-trainable variables.
TODO(romann): investigate slow compile times.
Args:
f:
:class:`tf.Module` or :obj:`tf.function` whose NTK we are computing. Must
satisfy the following:
- if a :obj:`tf.function`, must have the signature of `f(params, x)`.
- if a :class:`tf.Module`, must be either a :class:`tf.keras.Model`, or
be callable.
- input signature (`f.input_shape` for :class:`tf.Module` or
:class:`tf.keras.Model`, or `f.input_signature` for `tf.function`)
must be known.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
vmap_axes:
A triple of `(in_axes, out_axes, kwargs_axes)`
passed to `vmap` to evaluate the empirical NTK in parallel ove these axes.
Precisely, providing this argument implies that `f.call(x, **kwargs)`
equals to a concatenation along `out_axes` of `f` applied to slices of
`x` and `**kwargs` along `in_axes` and `kwargs_axes`. In other words, it
certifies that `f` can be evaluated as a `vmap` with `out_axes=out_axes`
over `x` (along `in_axes`) and those arguments in `**kwargs` that are
present in `kwargs_axes.keys()` (along `kwargs_axes.values()`).
This allows us to evaluate Jacobians much more
efficiently. If `vmap_axes` is not a triple, it is interpreted as
`in_axes = out_axes = vmap_axes, kwargs_axes = {}`. For example a very
common use case is `vmap_axes=0` for a neural network with leading (`0`)
batch dimension, both for inputs and outputs, and no interactions between
different elements of the batch (e.g. no BatchNorm, and, in the case of
`nt.stax`, also no Dropout). However, if there is interaction between
batch elements or no concept of a batch axis at all, `vmap_axes` must be
set to `None`, to avoid wrong (and potentially silent) results.
implementation:
An :class:`NtkImplementation` value (or an :class:`int` `0`, `1`, `2`, or
`3`). See the :class:`NtkImplementation` docstring for details.
_j_rules:
Internal debugging parameter, applicable only when `implementation` is
:attr:`~neural_tangents.NtkImplementation.STRUCTURED_DERIVATIVES` (`3`)
or :attr:`~neural_tangents.NtkImplementation.AUTO` (`0`). Set to `True`
to allow custom Jacobian rules for intermediary primitive `dy/dw`
computations for MJJMPs (matrix-Jacobian-Jacobian-matrix products). Set
to `False` to use JVPs or VJPs, via JAX's :obj:`jax.jacfwd` or
:obj:`jax.jacrev`. Custom Jacobian rules (`True`) are expected to be not
worse, and sometimes better than automated alternatives, but in case of a
suboptimal implementation setting it to `False` could improve performance.
_s_rules:
Internal debugging parameter, applicable only when `implementation` is
:attr:`~neural_tangents.NtkImplementation.STRUCTURED_DERIVATIVES` (`3`) or
:attr:`~neural_tangents.NtkImplementation.AUTO` (`0`). Set to `True` to
allow efficient MJJMp rules for structured `dy/dw` primitive Jacobians.
In practice should be set to `True`, and setting it to `False` can lead
to dramatic deterioration of performance.
_fwd:
Internal debugging parameter, applicable only when `implementation` is
:attr:`~neural_tangents.NtkImplementation.STRUCTURED_DERIVATIVES` (`3`) or
:attr:`~neural_tangents.NtkImplementation.AUTO` (`0`). Set to `True` to
allow :obj:`jax.jvp` in intermediary primitive Jacobian `dy/dw`
computations, `False` to always use :obj:`jax.vjp`. `None` to decide
automatically based on input/output sizes. Applicable when
`_j_rules=False`, or when a primitive does not have a Jacobian rule.
Should be set to `None` for best performance.
Returns:
A function `ntk_fn` that computes the empirical ntk.
"""
warnings.warn('This function is an early proof-of-concept.')
kwargs = dict(
trace_axes=trace_axes,
diagonal_axes=diagonal_axes,
vmap_axes=vmap_axes,
implementation=implementation,
_j_rules=_j_rules,
_s_rules=_s_rules,
_fwd=_fwd,
)
if isinstance(f, tf.Module):
apply_fn, _ = get_apply_fn_and_params(f)
elif isinstance(f, tf.types.experimental.GenericFunction):
apply_fn = tf2jax.convert_functional(f, *f.input_signature)
else:
raise NotImplementedError(f'Got `f={f}` of unsupported type {type(f)}, '
f'please file a bug at '
f'https://github.com/google/neural-tangents.')
ntk_fn = empirical_ntk_fn(apply_fn, **kwargs)
ntk_fn = jax2tf.convert(ntk_fn)
ntk_fn = tf.function(ntk_fn, jit_compile=True, autograph=False)
return ntk_fn
def get_apply_fn_and_params(f: tf.Module):
"""Converts a :class:`tf.Module` into a forward-pass `apply_fn` and `params`.
Use this function to extract `params` to pass to the Tensorflow empirical NTK
kernel function.
.. warning::
This function does not distinguish between trainable and non-trainable
parameters of the model.
Args:
f:
a :class:`tf.Module` to convert to a `apply_fn(params, x)` function. Must
have an `input_shape` attribute set (specifying shape of `x`), and be
callable or be a :class:`tf.keras.Model`.
Returns:
A tuple fo `(apply_fn, params)`, where `params` is a `PyTree[tf.Tensor]`.
"""
@tf.function
def forward_tf(x: PyTree) -> PyTree:
if isinstance(f, tf.keras.Model):
return f.call(x, training=False)
if not hasattr(f, '__call__'):
raise NotImplementedError(f'Got `f={f}` of type {type(f)}, '
f'that is not callable. Please file a bug at '
f'https://github.com/google/neural-tangents.')
return f(x)
if not hasattr(f, 'input_shape'):
raise NotImplementedError(f'`f={f}` must have `input_shape` set. '
f'Please file a bug at '
f'https://github.com/google/neural-tangents.')
apply_fn_, params = tf2jax.convert(forward_tf, tf.TensorSpec(f.input_shape))
def apply_fn(params: PyTree, x: PyTree) -> PyTree:
outputs, _ = apply_fn_(params, x) # Dropping parameters (not updated).
return outputs
return apply_fn, params
| 13,523 | 43.633663 | 168 | py |
CS-Unet | CS-Unet-main/test.py | import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.dataset_synapse import Synapse_dataset
from datasets.dataset_ACDC import ACDCdataset
from utils.utils import test_single_volume
from networks.vision_transformer import CS_Unet as ViT_seg
from trainer import trainer_synapse
from config import get_config
parser = argparse.ArgumentParser()
parser.add_argument('--volume_path', type=str,
default='../data/Synapse', help='root dir for validation volume data') # for acdc volume_path=root_dir
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
parser.add_argument('--num_classes', type=int,
default=9, help='output channel of network')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--output_dir', type=str,
default='./results', help='output dir')
parser.add_argument('--test_save_dir', default='./results/predictions', help='saving prediction as nii!')
parser.add_argument('--max_iterations', type=int,default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int, default=300, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.005, help='segmentation network learning rate')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
config = get_config(args)
def inference(args, model, test_save_path=None):
db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0]
metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size],
test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing)
metric_list += np.array(metric_i)
logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))
metric_list = metric_list / len(db_test)
for i in range(1, args.num_classes):
logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1]))
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))
return "Testing Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
dataset_config = {
'Synapse': {
'Dataset': Synapse_dataset,
'volume_path': args.volume_path,
'list_dir': './lists/lists_Synapse',
'num_classes': 9,
'z_spacing': 1,
},
'ACDC': {
'Dataset': ACDCdataset,
'volume_path': args.volume_path,
'list_dir': args.list_dir,
'num_classes': 4,
'z_spacing': 10,
},
}
dataset_name = args.dataset
args.num_classes = dataset_config[dataset_name]['num_classes']
args.volume_path = dataset_config[dataset_name]['volume_path']
args.Dataset = dataset_config[dataset_name]['Dataset']
args.list_dir = dataset_config[dataset_name]['list_dir']
args.z_spacing = dataset_config[dataset_name]['z_spacing']
args.is_pretrain = True
net = ViT_seg(config, img_size=args.img_size, num_classes=args.num_classes).cuda()
snapshot = os.path.join(args.output_dir, 'synapse_best 8108_epoch_187.pth')
if not os.path.exists(snapshot): snapshot = snapshot.replace('best_model', 'epoch_'+str(args.max_epochs-1))
msg = net.load_state_dict(torch.load(snapshot))
print("self trained swin unet",msg)
snapshot_name = snapshot.split('/')[-1]
log_folder = './test_log/acdc_test_log_'
os.makedirs(log_folder, exist_ok=True)
logging.basicConfig(filename=log_folder + '/'+snapshot_name+".txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
logging.info(snapshot_name)
if args.is_savenii:
args.test_save_dir = os.path.join(args.output_dir, "predictions")
test_save_path = args.test_save_dir
os.makedirs(test_save_path, exist_ok=True)
print(test_save_path)
else:
test_save_path = None
inference(args, net, test_save_path)
| 7,236 | 46.300654 | 159 | py |
CS-Unet | CS-Unet-main/trainer_ACDC.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.modules.loss import CrossEntropyLoss
import torchvision
# import matplotlib.pyplot as plt
from utils.utils import DiceLoss
from torch.utils.data import DataLoader
from datasets.dataset_ACDC import ACDCdataset, RandomGenerator
from tqdm import tqdm
import os
from torchvision import transforms
from utils.test_ACDC import inference
from medpy.metric import dc,hd95
def create_lr_scheduler(optimizer,
num_step: int,
epochs: int,
warmup=True,
warmup_epochs=15,
warmup_factor=1e-3):
assert num_step > 0 and epochs > 0
if warmup is False:
warmup_epochs = 0
def f(x):
"""
根据step数返回一个学习率倍率因子,
注意在训练开始之前,pytorch会提前调用一次lr_scheduler.step()方法
"""
warmup_epochs = int(epochs / 20)
if warmup is True and x <= (warmup_epochs * num_step):
alpha = float(x) / (warmup_epochs * num_step)
# warmup过程中lr倍率因子从warmup_factor -> 1
return warmup_factor * (1 - alpha) + alpha
else:
# warmup后lr倍率因子从1 -> 0
# 参考deeplab_v2: Learning rate policy
return (1 - (x - warmup_epochs * num_step) / ((epochs - warmup_epochs) * num_step)) ** 0.9
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=f)
def trainer_acdc(args, model, snapshot_path):
# if args.usecheckpoint:
# model.load_state_dict(torch.load(args.checkpoint))
train_dataset = ACDCdataset(args.root_path, args.list_dir, split="train", transform=
transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
Train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
db_val=ACDCdataset(base_dir=args.root_path, list_dir=args.list_dir, split="valid")
valloader=DataLoader(db_val, batch_size=1, shuffle=False)
db_test =ACDCdataset(base_dir=args.volume_path,list_dir=args.list_dir, split="test")
testloader = DataLoader(db_test, batch_size=1, shuffle=False)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model = model.cuda()
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = DiceLoss(args.num_classes)
save_interval = args.n_skip # int(max_epoch/6)
iter_num = 0
Loss = []
Test_Accuracy = []
Best_dcs = 0.8
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
max_iterations = args.max_epochs * len(Train_loader)
base_lr = args.base_lr
base_weight = args.base_weight
optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=base_weight)
# optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.0001)
# optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
# 创建学习率更新策略,这里是每个step更新一次(不是每个epoch)
lr_scheduler = create_lr_scheduler(optimizer, len(Train_loader), args.max_epochs, warmup=True)
for epoch in range(args.max_epochs):
model.train()
train_loss = 0
with tqdm(desc='Epoch %d - train' % (epoch),
unit='it', total=len(Train_loader)) as pbar:
for i_batch, sampled_batch in enumerate(Train_loader):
image_batch, label_batch = sampled_batch["image"], sampled_batch["label"]
image_batch, label_batch = image_batch.type(torch.FloatTensor), label_batch.type(torch.FloatTensor)
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
loss_ce = ce_loss(outputs, label_batch[:].long())
loss_dice = dice_loss(outputs, label_batch[:], softmax=True)
loss = loss_dice * 0.5+ loss_ce * 0.5
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
lr_ = optimizer.param_groups[0]["lr"]
# lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr_
iter_num = iter_num + 1
train_loss += loss.item()
# logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item()))
pbar.set_postfix(loss=train_loss / (i_batch + 1), lr=lr_)
pbar.update()
# Loss.append(train_loss/len(train_dataset))
# loss visualization
# fig1, ax1 = plt.subplots(figsize=(11, 8))
# ax1.plot(range(epoch + 1), Loss)
# ax1.set_title("Average trainset loss vs epochs")
# ax1.set_xlabel("Epoch")
# ax1.set_ylabel("Current loss")
# plt.savefig('loss_vs_epochs_gauss.png')
# plt.clf()
# plt.close()
# ---------- Validation ----------
if epoch > 50 and (epoch + 1) % save_interval == 0:
dc_sum = 0
model.eval()
for i, val_sampled_batch in enumerate(valloader):
val_image_batch, val_label_batch = val_sampled_batch["image"], val_sampled_batch["label"]
val_image_batch, val_label_batch = val_image_batch.type(torch.FloatTensor), val_label_batch.type(
torch.FloatTensor)
val_image_batch, val_label_batch = val_image_batch.cuda().unsqueeze(
1), val_label_batch.cuda().unsqueeze(1)
val_outputs = model(val_image_batch)
val_outputs = torch.argmax(torch.softmax(val_outputs, dim=1), dim=1).squeeze(0)
dc_sum += dc(val_outputs.cpu().data.numpy(), val_label_batch[:].cpu().data.numpy())
avg_dcs = dc_sum / len(valloader)
logging.info("Validation ===>avg_dsc: %f" % avg_dcs)
if avg_dcs > Best_dcs:
save_mode_path = os.path.join(snapshot_path, 'epoch={}_avg_dcs={}.pth'.format(epoch, avg_dcs))
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
Best_dcs = avg_dcs
# ---------- Test ----------
avg_dcs, avg_hd = inference(args, model, testloader, args.test_save_dir) #args.test_save_dir
Test_Accuracy.append(avg_dcs)
elif avg_dcs > 0.83:
# ---------- Test ----------
avg_dcs, avg_hd = inference(args, model, testloader, None)
Test_Accuracy.append(avg_dcs)
save_mode_path = os.path.join(snapshot_path, 'test_epoch={}_avg_dcs={}.pth'.format(epoch, avg_dcs))
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
# val visualization
# fig2, ax2 = plt.subplots(figsize=(11, 8))
# ax2.plot(range(int((epoch + 1) // save_interval)), Test_Accuracy)
# ax2.set_title("Average val dataset dice score vs epochs")
# ax2.set_xlabel("Epoch")
# ax2.set_ylabel("Current dice score")
# plt.savefig('val_dsc_vs_epochs_gauss.png')
# plt.clf()
# plt.close()
if epoch >= args.max_epochs - 1:
save_mode_path = os.path.join(snapshot_path, 'epoch={}_lr={}.pth'.format(epoch, lr_))
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
# ---------- Test ----------
avg_dcs, avg_hd = inference(args, model, testloader, None)
Test_Accuracy.append(avg_dcs)
print(max(Test_Accuracy))
break
return "Training Finished!" | 8,164 | 42.663102 | 132 | py |
CS-Unet | CS-Unet-main/metrics.py | import torch
import numpy as np
from hausdorff import hausdorff_distance
from medpy.metric.binary import hd, dc
def dice(pred, target):
pred = pred.contiguous()
target = target.contiguous()
smooth = 0.00001
# intersection = (pred * target).sum(dim=2).sum(dim=2)
pred_flat = pred.view(1, -1)
target_flat = target.view(1, -1)
intersection = (pred_flat * target_flat).sum().item()
# loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))
dice = (2 * intersection + smooth) / (pred_flat.sum().item() + target_flat.sum().item() + smooth)
return dice
def dice3D(img_gt, img_pred, voxel_size):
"""
Function to compute the metrics between two segmentation maps given as input.
Parameters
----------
img_gt: np.array
Array of the ground truth segmentation map.
img_pred: np.array
Array of the predicted segmentation map.
voxel_size: list, tuple or np.array
The size of a voxel of the images used to compute the volumes.
Return
------
A list of metrics in this order, [Dice LV, Volume LV, Err LV(ml),
Dice RV, Volume RV, Err RV(ml), Dice MYO, Volume MYO, Err MYO(ml)]
"""
if img_gt.ndim != img_pred.ndim:
raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
"same dimension, {} against {}".format(img_gt.ndim,
img_pred.ndim))
res = []
# Loop on each classes of the input images
for c in [3, 1, 2]:
# Copy the gt image to not alterate the input
gt_c_i = np.copy(img_gt)
gt_c_i[gt_c_i != c] = 0
# Copy the pred image to not alterate the input
pred_c_i = np.copy(img_pred)
pred_c_i[pred_c_i != c] = 0
# Clip the value to compute the volumes
gt_c_i = np.clip(gt_c_i, 0, 1)
pred_c_i = np.clip(pred_c_i, 0, 1)
# Compute the Dice
dice = dc(gt_c_i, pred_c_i)
# Compute volume
# volpred = pred_c_i.sum() * np.prod(voxel_size) / 1000.
# volgt = gt_c_i.sum() * np.prod(voxel_size) / 1000.
# res += [dice, volpred, volpred-volgt]
res += [dice]
return res
def hd_3D(img_pred, img_gt, labels=[3, 1, 2]):
res = []
for c in labels:
gt_c_i = np.copy(img_gt)
gt_c_i[gt_c_i != c] = 0
pred_c_i = np.copy(img_pred)
pred_c_i[pred_c_i != c] = 0
gt_c_i = np.clip(gt_c_i, 0, 1)
pred_c_i = np.clip(pred_c_i, 0, 1)
if np.sum(pred_c_i) == 0 or np.sum(gt_c_i) == 0:
hausdorff = 0
else:
hausdorff = hd(pred_c_i, gt_c_i)
res += [hausdorff]
return res
def cal_hausdorff_distance(pred,target):
pred = np.array(pred.contiguous())
target = np.array(target.contiguous())
result = hausdorff_distance(pred,target,distance="euclidean")
return result
def make_one_hot(input, num_classes):
"""Convert class index tensor to one hot encoding tensor.
Args:
input: A tensor of shape [N, 1, *]
num_classes: An int of number of class
Returns:
A tensor of shape [N, num_classes, *]
"""
shape = np.array(input.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape).scatter_(1, input.cpu().long(), 1)
# result = result.scatter_(1, input.cpu(), 1)
return result
def match_pred_gt(pred, gt):
""" pred: (1, C, H, W)
gt: (1, C, H, W)
"""
gt_labels = torch.unique(gt, sorted=True)[1:]
pred_labels = torch.unique(pred, sorted=True)[1:]
if len(gt_labels) != 0 and len(pred_labels) != 0:
dice_Matrix = torch.zeros((len(pred_labels), len(gt_labels)))
for i, pl in enumerate(pred_labels):
pred_i = torch.tensor(pred==pl, dtype=torch.float)
for j, gl in enumerate(gt_labels):
dice_Matrix[i, j] = dice(make_one_hot(pred_i, 2)[0], make_one_hot(gt==gl, 2)[0])
# max_axis0 = np.max(dice_Matrix, axis=0)
max_arg0 = np.argmax(dice_Matrix, axis=0)
else:
return torch.zeros_like(pred)
pred_match = torch.zeros_like(pred)
for i, arg in enumerate(max_arg0):
pred_match[pred==pred_labels[arg]] = i + 1
return pred_match
if __name__ == "__main__":
npy_path = "/home/fcheng/Cardia/source_code/logs/logs_df_50000/eval_pp_test/200.npy"
pred_df, gt_df = np.load(npy_p)
| 4,492 | 29.773973 | 120 | py |
CS-Unet | CS-Unet-main/train.py | import argparse
import logging
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from networks.vision_transformer import CS_Unet as ViT_seg
from trainer import trainer_synapse
from trainer_ACDC import trainer_acdc
from config import get_config
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/Synapse', help='root dir for data') # '../data/processed_acdc_dataset'
parser.add_argument('--volume_path', type=str, # ../data/processed_acdc_dataset
default='../data/Synapse', help='test dir for data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--num_classes', type=int,
default=9, help='output channel of network')
parser.add_argument('--output_dir', type=str,
default='./results', help='output dir')
parser.add_argument('--test_save_dir', default='./results/acdc/predictions', help='saving prediction as nii!')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int,
default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int,
default=24, help='batch_size per gpu')
parser.add_argument("--n_skip", type=int,default=5)
parser.add_argument('--n_gpu', type=int, default=1, help='total gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--base_weight', type=float, default=0.0005,
help='segmentation network learning rate')
parser.add_argument('--img_size', type=int,
default=224, help='input patch size of network input')
parser.add_argument('--seed', type=int,
default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.root_path = os.path.join(args.root_path, "train_npz")
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
config = get_config(args)
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
dataset_name = args.dataset
dataset_config = {
'Synapse': {
'root_path': args.root_path,
'volume_path': args.volume_path,
'list_dir': './lists/lists_Synapse',
'num_classes': 9,
'z_spacing': 1,
},
'ACDC': {
'root_path': args.root_path,
'volume_path': args.volume_path,
'list_dir': args.list_dir,
'num_classes': 4,
'z_spacing': 10,
},
}
# if args.batch_size != 24 and args.batch_size % 6 == 0:
# args.base_lr *= args.batch_size / 24
args.num_classes = dataset_config[dataset_name]['num_classes']
args.root_path = dataset_config[dataset_name]['root_path']
args.list_dir = dataset_config[dataset_name]['list_dir']
args.volume_path = dataset_config[dataset_name]['volume_path']
args.z_spacing = dataset_config[dataset_name]['z_spacing']
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
net = ViT_seg(config, img_size=args.img_size, num_classes=args.num_classes).cuda()
# net.load_from(config)
if args.dataset == "Synapse":
trainer = {'Synapse': trainer_synapse,}
trainer[dataset_name](args, net, args.output_dir)
else:
trainer_ACDC = {'ACDC': trainer_acdc,}
trainer_ACDC[dataset_name](args, net, args.output_dir)
| 5,517 | 43.144 | 110 | py |
CS-Unet | CS-Unet-main/trainer.py | import argparse
import logging
import os
import random
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
from utils.utils import DiceLoss
from torchvision import transforms
from utils.utils import test_single_volume
from metrics import dice, cal_hausdorff_distance
def get_mean_std(loader):
# var[X] = E[X**2] - E[X]**2 方差公式, var[]代表方差,E[]表示期望(平均值)
channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
for _, data in enumerate(loader):
data = data['image']
# print(data.shape)
channels_sum += torch.mean(data, dim = [0, 2, 3])
channels_sqrd_sum += torch.mean(data ** 2, dim = [0, 2, 3])
num_batches += 1
mean = channels_sum / num_batches
std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
return mean, std, num_batches
def create_lr_scheduler(optimizer,
num_step: int,
epochs: int,
warmup=True,
warmup_epochs=15,
warmup_factor=1e-3):
assert num_step > 0 and epochs > 0
if warmup is False:
warmup_epochs = 0
def f(x):
"""
根据step数返回一个学习率倍率因子,
注意在训练开始之前,pytorch会提前调用一次lr_scheduler.step()方法
"""
warmup_epochs = int(epochs / 20)
if warmup is True and x <= (warmup_epochs * num_step):
alpha = float(x) / (warmup_epochs * num_step)
# warmup过程中lr倍率因子从warmup_factor -> 1
return warmup_factor * (1 - alpha) + alpha
else:
# warmup后lr倍率因子从1 -> 0
# 参考deeplab_v2: Learning rate policy
return (1 - (x - warmup_epochs * num_step) / ((epochs - warmup_epochs) * num_step)) ** 0.9
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=f)
def trainer_synapse(args, model, snapshot_path):
from datasets.dataset_synapse import Synapse_dataset, RandomGenerator
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
# max_iterations = args.max_iterations
# ---------- construct dataset ----------
db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
print("The length of train set is: {}".format(len(db_train)))
db_test = Synapse_dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
print("The test iterations per epoch is: {}".format(len(testloader)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,
worker_init_fn=worker_init_fn)
# print(get_mean_std(trainloader))
# quit()
if args.n_gpu > 1:
model = nn.DataParallel(model)
# ---------- training ----------
ce_loss = CrossEntropyLoss()
dice_loss = DiceLoss(num_classes)
# optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=5E-3)
# 创建学习率更新策略,这里是每个step更新一次(不是每个epoch)
lr_scheduler = create_lr_scheduler(optimizer, len(trainloader), args.max_epochs, warmup=True)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
#iterator = tqdm(, ncols=70)
# save best model
best_performance = 0.0
for epoch_num in range(max_epoch):
total_loss = 0
model.train()
with tqdm(desc='Epoch %d - train' % (epoch_num),
unit='it', total=len(trainloader)) as pbar:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
outputs = model(image_batch)
loss_ce = ce_loss(outputs, label_batch[:].long())
loss_dice = dice_loss(outputs, label_batch, softmax=True)
loss = 0.4 * loss_ce + 0.6 * loss_dice
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
lr_ = optimizer.param_groups[0]["lr"]
# lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr_
total_loss += loss.item()
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
#logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item()))
pbar.set_postfix(loss=total_loss / (i_batch + 1), lr=lr_)
pbar.update()
if iter_num % 20 == 0:
image = image_batch[1, 0:1, :, :]
image = (image - image.min()) / (image.max() - image.min())
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
# print(lr_)
# ---------- Validation ----------
# if (epoch_num > 10) and (epoch_num + 1) % 5 == 0:
# if (epoch_num > 158 and (epoch_num + 1) % 10 == 0) or (epoch_num >= max_epoch - 1):
if epoch_num > 128 and (epoch_num + 1) % 4 == 0:
# if (epoch_num > 138 and (epoch_num + 1) % 10 == 0) or (epoch_num > 198 and (epoch_num + 1) % 5 == 0):
model.eval()
with torch.no_grad():
metric_list = 0.0
for j_batch, sample in enumerate(testloader):
h, w = sample["image"].size()[2:]
image, label, case_name = sample["image"], sample["label"], sample['case_name'][0]
metric_i = test_single_volume(image, label, model, classes=args.num_classes,
patch_size=[args.img_size, args.img_size],
test_save_path=None, case=case_name, z_spacing=args.z_spacing)
metric_list += np.array(metric_i)
# logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (
# i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))
metric_list = metric_list / len(db_test)
for i in range(1, args.num_classes):
logging.info(
'Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i - 1][0], metric_list[i - 1][1]))
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
logging.info(
'valid performance: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path, 'best_epoch_' + str(epoch_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
# ---------- save results ----------
# save_interval = 10 # int(max_epoch/6)
# if epoch_num > int(max_epoch / 2 +20) and (epoch_num + 1) % save_interval == 0:
# save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
# torch.save(model.state_dict(), save_mode_path)
# logging.info("save model to {}".format(save_mode_path))
#
if epoch_num >= max_epoch - 1:
print(best_performance)
# save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')
# torch.save(model.state_dict(), save_mode_path)
# logging.info("save model to {}".format(save_mode_path))
# #iterator.close()
# break
writer.close()
return "Training Finished!"
| 9,360 | 45.572139 | 118 | py |
CS-Unet | CS-Unet-main/networks/vision_transformer.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from .conv_swin_transformer_unet_skip_expand_decoder_sys import ConvSwinTransformerSys
logger = logging.getLogger(__name__)
class CS_Unet(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(CS_Unet, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.config = config
self.CS_Unet = ConvSwinTransformerSys(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=self.num_classes,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
def forward(self, x):
if x.size()[1] == 1:
x = x.repeat(1,3,1,1)
logits = self.CS_Unet(x)
return logits
def load_from(self, config):
pretrained_path = config.MODEL.PRETRAIN_CKPT
if pretrained_path is not None:
print("pretrained_path:{}".format(pretrained_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_dict = torch.load(pretrained_path, map_location=device)
if "model" not in pretrained_dict:
print("---start load pretrained modle by splitting---")
pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()}
print(k)
for k in list(pretrained_dict.keys()):
if "output" in k:
print("delete key:{}".format(k))
del pretrained_dict[k]
msg = self.CS_Unet.load_state_dict(pretrained_dict,strict=False)
print(msg)
return
pretrained_dict = pretrained_dict['model']
print("---start load pretrained modle of swin encoder---")
model_dict = self.CS_Unet.state_dict()
# print(self.swin_unet)
full_dict = copy.deepcopy(pretrained_dict)
for k, v in pretrained_dict.items():
if "layers." in k:
current_layer_num = 3-int(k[7:8])
current_k = "layers_up." + str(current_layer_num) + k[8:]
full_dict.update({current_k:v})
for k in list(full_dict.keys()):
if k in model_dict:
if full_dict[k].shape != model_dict[k].shape:
print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape))
del full_dict[k]
msg = self.CS_Unet.load_state_dict(full_dict, strict=False)
# print(msg)
else:
print("none pretrain")
| 3,953 | 42.450549 | 113 | py |
CS-Unet | CS-Unet-main/networks/conv_swin_transformer_unet_skip_expand_decoder_sys.py | import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from einops.layers.torch import Rearrange
class Mlp(nn.Module):
def __init__(self, dim, drop_path=0.2, layer_scale_init_value=0.7):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv 7,3 5,2 3,1
self.norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Conv2d(dim, 4 * dim, kernel_size=1)
self.act = nn.GELU()
self.pwconv2 = nn.Conv2d(4 * dim, dim, kernel_size=1) # nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x # B, H, W, C
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = x.permute(0, 3, 1, 2)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
if self.gamma is not None:
x = self.gamma * x
x = input + self.drop_path(x) # (N, H, W, C)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.conv_proj_q = self._build_projection(dim, kernel_size=3, stride=1, padding=1)
self.conv_proj_k = self._build_projection(dim, kernel_size=3, stride=1, padding=1)
self.conv_proj_v = self._build_projection(dim, kernel_size=3, stride=1, padding=1)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1, bias=False, groups=dim), nn.GELU())
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def _build_projection(self, dim_in, kernel_size=3, stride=1, padding=1):
proj = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size, padding=padding, stride=stride, bias=False, groups=dim_in),
Rearrange('b c h w -> b (h w) c'),
nn.LayerNorm(dim_in))
return proj
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
# [batch_size*num_windows, Mh*Mw, total_embed_dim]
B_, N, C = x.shape
Mh = int(N ** .5)
x = x.view(B_, Mh, Mh, C).permute(0, 3, 1, 2) # [batch_size*num_windows, Mh, Mw, total_embed_dim]
# when we use conv the shape should be B, C, H, W. so use permute
q = self.conv_proj_q(x).reshape(B_, N, self.num_heads, C // self.num_heads).permute(0, 2, 1,
3) # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
k = self.conv_proj_k(x).reshape(B_, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = self.conv_proj_v(x).reshape(B_, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
# transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]
# @:multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]
if mask is not None:
# mask: [nW, Mh*Mw, Mh*Mw]
nW = mask.shape[0] # num_windows
# attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
# mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(2, 3).reshape(B_, C, Mh, Mh)
x = self.proj(x)
x = x.reshape(B_, C, N).transpose(1, 2)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
class ConvSwinTransformerBlock(nn.Module):
r""" Conv Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(dim=dim, drop_path=drop)
if self.shift_size > 0:
# calculate attention mask for SW-CMSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
x = shortcut + self.drop_path(x)
# FFN
x = x.view(B, H, W, C)
x = self.mlp(x)
x = x.view(B, H * W, C)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.up = nn.Sequential(nn.ConvTranspose2d(dim, dim // dim_scale, kernel_size=2, stride=2), nn.GELU())
self.norm = norm_layer(dim)
self.drop = nn.Dropout(p=0.2)
def forward(self, x):
"""
x: B, H*W, C → B, H*2*W*2, C/2
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = self.norm(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = self.up(x)
x = self.drop(x)
x = x.permute(0, 2, 3, 1).contiguous().view(B, -1, C // 2)
return x
class FinalPatchExpand_X4(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.dim_scale = dim_scale
self.expand = nn.Linear(dim, 16 * dim, bias=False)
self.output_dim = dim
self.norm = norm_layer(self.output_dim)
def forward(self, x):
"""
x: B, H*W, C → B, H*4*W*4, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale,
c=C // (self.dim_scale ** 2))
x = x.view(B, -1, self.output_dim)
x = self.norm(x)
return x
class BasicLayer(nn.Module):
""" A basic convolutional Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
ConvSwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class BasicLayer_up(nn.Module):
""" A basic Convolutional Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
ConvSwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if upsample is not None:
self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.upsample is not None:
x = self.upsample(x)
return x
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim // 2, kernel_size=3, stride=1, padding=1), nn.GELU(),
nn.Conv2d(embed_dim // 2, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.GELU(),
Rearrange('b c h w -> b h w c'),
norm_layer(embed_dim // 2),
Rearrange('b h w c -> b c h w'),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), nn.GELU(),
nn.Conv2d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1), nn.GELU())
if norm_layer is not None:
self.norm = norm_layer(in_chans)
self.norm2 = norm_layer(embed_dim)
else:
self.norm = None
self.drop = nn.Dropout(p=0.2)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
x = self.drop(x).flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm2(x)
return x
class ConvSwinTransformerSys(nn.Module):
"""
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 3, 3, 3],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, final_upsample="expand_first", **kwargs):
super().__init__()
print(
"ConvSwinTransformerSys expand initial----depths:{};depths_decoder:{};num_heads=:{};drop_path_rate:{};num_classes:{}".format(
depths,
depths_decoder, num_heads, drop_path_rate, num_classes))
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features_up = int(embed_dim * 2)
self.mlp_ratio = mlp_ratio
self.final_upsample = final_upsample
# split image into overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build encoder and bottleneck layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
# build decoder layers
self.layers_up = nn.ModuleList()
self.concat_back_dim = nn.ModuleList()
for i_layer in range(self.num_layers):
concat_cov = self.up = nn.Sequential(Rearrange('b (h w) c -> b c h w', h=patches_resolution[0] // (2 ** (self.num_layers - 1 - i_layer)), w=patches_resolution[1] // (2 ** (self.num_layers - 1 - i_layer))),
nn.Conv2d(2 * int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),
int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),
kernel_size=3, stride=1, padding=1), nn.GELU(),
nn.Conv2d(int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),
int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),
kernel_size=3, stride=1, padding=1), nn.GELU(),
nn.Dropout(p=0.2),
Rearrange('b c h w -> b (h w) c', h=patches_resolution[0] // (
2 ** (self.num_layers - 1 - i_layer)),
w=patches_resolution[1] // (
2 ** (self.num_layers - 1 - i_layer))))
if i_layer == 0:
layer_up = PatchExpand(
input_resolution=(patches_resolution[0] // (2 ** (self.num_layers - 1 - i_layer)),
patches_resolution[1] // (2 ** (self.num_layers - 1 - i_layer))),
dim=int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)), dim_scale=2, norm_layer=norm_layer)
else:
layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),
input_resolution=(
patches_resolution[0] // (2 ** (self.num_layers - 1 - i_layer)),
patches_resolution[1] // (2 ** (self.num_layers - 1 - i_layer))),
depth=depths[(self.num_layers - 1 - i_layer)],
num_heads=num_heads[(self.num_layers - 1 - i_layer)],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:(self.num_layers - 1 - i_layer)]):sum(
depths[:(self.num_layers - 1 - i_layer) + 1])],
norm_layer=norm_layer,
upsample=PatchExpand if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers_up.append(layer_up)
self.concat_back_dim.append(concat_cov)
self.norm = norm_layer(self.num_features)
self.norm_up = norm_layer(self.embed_dim)
if self.final_upsample == "expand_first":
print("---final upsample expand_first---")
self.up = FinalPatchExpand_X4(input_resolution=(img_size // patch_size, img_size // patch_size),
dim_scale=4, dim=embed_dim)
self.output = nn.Conv2d(in_channels=embed_dim, out_channels=self.num_classes, kernel_size=1, bias=False)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
# Encoder and Bottleneck
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x_downsample = []
for layer in self.layers:
x_downsample.append(x)
x = layer(x)
x = self.norm(x) # B L C
return x, x_downsample
# Dencoder and Skip connection
def forward_up_features(self, x, x_downsample):
for inx, layer_up in enumerate(self.layers_up):
if inx == 0:
x = layer_up(x)
else:
x = torch.cat([x, x_downsample[3 - inx]], -1)
x = self.concat_back_dim[inx](x)
x = layer_up(x)
x = self.norm_up(x) # B L C
return x
def up_x4(self, x):
H, W = self.patches_resolution
B, L, C = x.shape
assert L == H * W, "input features has wrong size"
if self.final_upsample == "expand_first":
x = self.up(x)
x = x.view(B, 4 * H, 4 * W, -1)
x = x.permute(0, 3, 1, 2) # B,C,H,W
x = self.output(x)
return x
def forward(self, x):
x, x_downsample = self.forward_features(x)
x = self.forward_up_features(x, x_downsample)
x = self.up_x4(x)
return x
| 32,474 | 43.123641 | 217 | py |
CS-Unet | CS-Unet-main/datasets/dataset_synapse.py | import os
import random
import h5py
import numpy as np
import torch
from scipy import ndimage
from scipy.ndimage.interpolation import zoom
from torch.utils.data import Dataset
import json
import torchvision.transforms as T
from .aug import RandomAffine, GaussianBlur, To_PIL_Image,JointCompose, JointTo_Tensor
def normalize(img, mean, std):
mean = torch.as_tensor(mean,dtype=img.dtype,device=img.device)
std = torch.as_tensor(std,dtype=img.dtype,device=img.device)
return (img-mean)/std
def random_rot_flip(image, label):
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return image, label
def random_rotate(image, label):
angle = np.random.randint(-20, 20)
image = ndimage.rotate(image, angle, order=0, reshape=False)
label = ndimage.rotate(label, angle, order=0, reshape=False)
return image, label
class RandomGenerator(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
if random.random() > 0.5:
image, label = random_rot_flip(image, label)
elif random.random() > 0.5:
image, label = random_rotate(image, label)
x, y = image.shape
if x != self.output_size[0] or y != self.output_size[1]:
image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3?
label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.float32))
sample = {'image': image, 'label': label.long()}
return sample
class resize(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
x, y = image.shape
if x != self.output_size[0] or y != self.output_size[1]:
image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3?
label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
# image = normalize(image, mean=[52.95], std=[52.15])
# print(image.shape)
# quit()
label = torch.from_numpy(label.astype(np.float32))
sample = {'image': image, 'label': label.long()}
return sample
class Synapse_dataset(Dataset):
def __init__(self, base_dir, list_dir, split, transform=None):
self.transform = transform # using transform in torch!
self.split = split
self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines()
self.data_dir = base_dir
def __len__(self):
return len(self.sample_list)
def __getitem__(self, idx):
if self.split == "train":
slice_name = self.sample_list[idx].strip('\n')
data_path = os.path.join(self.data_dir, slice_name+'.npz')
data = np.load(data_path)
image, label = data['image'], data['label']
else:
vol_name = self.sample_list[idx].strip('\n')
filepath = self.data_dir + "/{}.npy.h5".format(vol_name)
data = h5py.File(filepath)
image, label = data['image'][:], data['label'][:]
sample = {'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
sample['case_name'] = self.sample_list[idx].strip('\n')
return sample
class AcdcDataset(Dataset):
target_augment: object
def __init__(self, base_dir, list_dir = None, split='train', transform = None):
self.transform = transform
self.split = split
with open(os.path.join(base_dir, self.split+'.json'), 'r') as f:
self.data_infos = json.load(f)
def __len__(self):
return len(self.data_infos)
def __getitem__(self, index):
img = h5py.File(self.data_infos[index], 'r')['image']
gt = h5py.File(self.data_infos[index], 'r')['label']
# print(np.unique(gt))
# img = np.array(img)[:, :, None].astype(np.float32)
# gt = np.array(gt)[:, :, None].astype(np.float32)
#
img = np.array(img)[:, :]
gt = np.array(gt)[:, :]
# print(np.unique(gt))
img_id = self.data_infos[index].split("_set/P_")[1].split(".hdf5")[0]
# print(img_id)
# exit()
sample = {'image': img, 'label': gt, 'case_name': img_id}
if self.transform:
sample = self.transform(sample)
return sample
| 4,893 | 34.463768 | 106 | py |
CS-Unet | CS-Unet-main/datasets/dataset_ACDC.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import random
import numpy as np
import torch
from scipy import ndimage
from scipy.ndimage.interpolation import zoom
from torch.utils.data import Dataset
from .aug import RandomAffine, GaussianBlur, To_PIL_Image,JointCompose, JointTo_Tensor
import torchvision.transforms as T
def random_rot_flip(image, label):
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return image, label
def random_rotate(image, label):
angle = np.random.randint(-20, 20)
image = ndimage.rotate(image, angle, order=0, reshape=False)
label = ndimage.rotate(label, angle, order=0, reshape=False)
return image, label
class RandomGenerator(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
if random.random() > 0.5:
image, label = random_rot_flip(image, label)
elif random.random() > 0.5:
image, label = random_rotate(image, label)
x, y = image.shape
if x != self.output_size[0] or y != self.output_size[1]:
image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3?
label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.float32))
sample = {'image': image, 'label': label.long()}
return sample
class ACDCdataset(Dataset):
def __init__(self, base_dir, list_dir, split, transform=None):
self.transform = transform # using transform in torch!
self.split = split
self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines()
self.data_dir = base_dir
def __len__(self):
return len(self.sample_list)
def __getitem__(self, idx):
if self.split == "train" or self.split == "valid":
slice_name = self.sample_list[idx].strip('\n')
data_path = os.path.join(self.data_dir, self.split, slice_name)
data = np.load(data_path)
image, label = data['img'], data['label']
else:
vol_name = self.sample_list[idx].strip('\n')
filepath = self.data_dir + "/{}".format(vol_name)
data = np.load(filepath)
image, label = data['img'], data['label']
sample = {'image': image, 'label': label}
if self.transform and self.split == "train":
sample = self.transform(sample)
sample['case_name'] = self.sample_list[idx].strip('\n')
return sample | 2,871 | 34.02439 | 106 | py |
CS-Unet | CS-Unet-main/datasets/aug.py | import numpy as np
import random
import numbers
from torchvision.transforms import functional as F
from PIL import Image, ImageFilter
import torch
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
class JointCompose():
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert img.size == mask.size
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
class JointTo_Tensor():
def __call__(self, arr, arr2):
if len(np.array(arr).shape) == 2:
arr = np.array(arr)[:,:,None]
if len(np.array(arr2).shape) == 2:
arr2 = np.array(arr2)[:, :, None]
arr = torch.from_numpy(np.array(arr).transpose(2,0,1))
arr2 = torch.from_numpy(np.array(arr2).transpose(2,0,1))
return arr, arr2
class To_PIL_Image():
def __call__(self, img, mask):
img = to_pil_image(img)
mask = to_pil_image(mask)
return img, mask
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to desactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or float or int, optional): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Will not apply shear by default
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0, prob=0.5):
self.prob = prob
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, img, mask):
"""
img (PIL Image): Image to be transformed.
Returns:
PIL Image: Affine transformed image.
"""
if random.random() < self.prob:
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor), F.affine(mask, *ret, resample=self.resample, fillcolor=self.fillcolor)
return img, mask
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
if self.fillcolor != 0:
s += ', fillcolor={fillcolor}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPIlImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
Returns:
PIL Image: Image converted to PIL Image.
"""
# if not(_is_numpy_image(pic) or _is_tensor_image(pic)):
# raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
| 9,525 | 39.194093 | 160 | py |
CS-Unet | CS-Unet-main/utils/test_ACDC.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.utils import test_single_volume
def inference(args, model, testloader, test_save_path=None):
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
with torch.no_grad():
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0]
metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size],
test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing)
metric_list += np.array(metric_i)
logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))
metric_list = metric_list / len(testloader)
for i in range(1, args.num_classes):
logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1]))
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
logging.info('Testing performance: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))
# logging.info("Testing Finished!")
return performance, mean_hd95
| 1,585 | 45.647059 | 151 | py |
CS-Unet | CS-Unet-main/utils/utils.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import torch
from medpy import metric
import torch.nn as nn
from PIL import Image
from torchvision import transforms
import SimpleITK as sitk
from scipy.ndimage import zoom
class Normalize():
def __call__(self, sample):
function = transforms.Normalize((.5 , .5, .5), (0.5, 0.5, 0.5))
return function(sample[0]), sample[1]
class ToTensor():
def __call__(self, sample):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
function = transforms.ToTensor()
return function(sample[0]), function(sample[1])
class RandomRotation():
def __init__(self):
pass
def __call__(self, sample):
img, label = sample
random_angle = np.random.randint(0, 360)
return img.rotate(random_angle, Image.NEAREST), label.rotate(random_angle, Image.NEAREST)
class RandomFlip():
def __init__(self):
pass
def __call__(self, sample):
img, label = sample
temp = np.random.random()
if temp > 0 and temp < 0.25:
return img.transpose(Image.FLIP_LEFT_RIGHT), label.transpose(Image.FLIP_LEFT_RIGHT)
elif temp >= 0.25 and temp < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM), label.transpose(Image.FLIP_TOP_BOTTOM)
elif temp >= 0.5 and temp < 0.75:
return img.transpose(Image.ROTATE_90), label.transpose(Image.ROTATE_90)
else:
return img, label
class DiceLoss(nn.Module):
def __init__(self, n_classes):
super(DiceLoss, self).__init__()
self.n_classes = n_classes
def _one_hot_encoder(self, input_tensor):
tensor_list = []
for i in range(self.n_classes):
temp_prob = input_tensor == i # * torch.ones_like(input_tensor)
tensor_list.append(temp_prob.unsqueeze(1))
output_tensor = torch.cat(tensor_list, dim=1)
return output_tensor.float()
def _dice_loss(self, score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def forward(self, inputs, target, weight=None, softmax=False):
if softmax:
inputs = torch.softmax(inputs, dim=1)
target = self._one_hot_encoder(target)
if weight is None:
weight = [1] * self.n_classes
assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())
class_wise_dice = []
loss = 0.0
for i in range(0, self.n_classes):
dice = self._dice_loss(inputs[:, i], target[:, i])
class_wise_dice.append(1.0 - dice.item())
loss += dice * weight[i]
return loss / self.n_classes
'''
def calculate_metric_percase(output, target):
smooth = 1e-5
if torch.is_tensor(output):
output = torch.sigmoid(output).data.cpu().numpy()
if torch.is_tensor(target):
target = target.data.cpu().numpy()
if output.sum() > 0 and target.sum() > 0:
hd = metric.binary.hd(output, target)
else:
hd = 0
intersection = (output * target).sum()
return (2. * intersection + smooth) / \
(output.sum() + target.sum() + smooth), hd
'''
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0 and gt.sum()>0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
elif pred.sum() > 0 and gt.sum()==0:
return 1, 0
else:
return 0, 0
def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):
image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
if len(image.shape) == 3:
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
#imshow(slice, "./out/" + case[:-4] + "_img_" + str(ind) + ".jpg", denormalize=False)
#imshow(label_slice, "./out/" + case[:-4] + "_label_" + str(ind) + ".jpg", denormalize=False)
x, y = slice.shape[0], slice.shape[1]
if x != patch_size[0] or y != patch_size[1]:
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0
input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
outputs = net(input)
out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
#imshow(out, "./out_1/" + case[:-4] + "_pre_" + str(ind) + ".jpg", denormalize=False)
if x != patch_size[0] or y != patch_size[1]:
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
else:
pred = out
prediction[ind] = pred
else:
input = torch.from_numpy(image).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
prediction = out.cpu().detach().numpy()
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(prediction == i, label == i))
if test_save_path is not None:
img_itk = sitk.GetImageFromArray(image.astype(np.float32))
prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
img_itk.SetSpacing((1, 1, z_spacing))
prd_itk.SetSpacing((1, 1, z_spacing))
lab_itk.SetSpacing((1, 1, z_spacing))
sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz")
sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz")
sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz")
return metric_list | 6,398 | 35.152542 | 127 | py |
CS-Unet | CS-Unet-main/utils/test_Synapse.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.utils import test_single_volume
def inference(args, model, testloader, test_save_path=None):
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
with torch.no_grad():
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0]
metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size],
test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing)
metric_list += np.array(metric_i)
logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))
metric_list = metric_list / len(testloader)
for i in range(1, args.num_classes):
logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1]))
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))
logging.info("Testing Finished!")
return performance, mean_hd95 | 1,598 | 50.580645 | 151 | py |
DeepPersonality | DeepPersonality-main/dpcv/__init__.py | import torch
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
# optionally print the sys.path for debugging)
# print("in _ _init_ _.py sys.path:\n ",sys.path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
| 290 | 28.1 | 69 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/utils.py | # Copyright (C) 2020-2021, François-Guillaume Fernandez.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import torch
from torch import Tensor
from torch import nn
from typing import List, Optional, Tuple
from functools import partial
__all__ = ['locate_candidate_layer', 'locate_linear_layer']
def locate_candidate_layer(mod: nn.Module, input_shape: Tuple[int, ...] = (3, 224, 224)) -> Optional[str]:
"""Attempts to find a candidate layer to use for CAM extraction
Args:
mod: the module to inspect
input_shape: the expected shape of input tensor excluding the batch dimension
Returns:
str: the candidate layer for CAM
"""
# set module in eval mode
module_mode = mod.training
mod.eval()
output_shapes: List[Tuple[Optional[str], Tuple[int, ...]]] = []
def _record_output_shape(module: nn.Module, input: Tensor, output: Tensor, name: Optional[str] = None) -> None:
"""Activation hook"""
output_shapes.append((name, output.shape))
hook_handles: List[torch.utils.hooks.RemovableHandle] = []
# forward hook on all layers
for n, m in mod.named_modules():
hook_handles.append(m.register_forward_hook(partial(_record_output_shape, name=n)))
# forward empty
with torch.no_grad():
_ = mod(torch.rand(1, *input_shape, device=next(mod.parameters()).data.device))
# Remove all temporary hooks
for handle in hook_handles:
handle.remove()
# Put back the model in the corresponding mode
mod.training = module_mode
# Check output shapes
candidate_layer = None
for layer_name, output_shape in output_shapes:
# Stop before flattening or global pooling
if len(output_shape) != (len(input_shape) + 1) or all(v == 1 for v in output_shape[2:]):
break
else:
candidate_layer = layer_name
return candidate_layer
def locate_linear_layer(mod: nn.Module) -> Optional[str]:
"""Attempts to find a fully connecter layer to use for CAM extraction
Args:
mod: the module to inspect
Returns:
str: the candidate layer
"""
candidate_layer = None
for layer_name, m in mod.named_modules():
if isinstance(m, nn.Linear):
candidate_layer = layer_name
break
return candidate_layer
| 2,449 | 29.246914 | 115 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/excitation_bp.py | import weakref
import torch
import math
import torch.nn.functional as F
# EPSILON_DOUBLE = torch.tensor(2.220446049250313e-16, dtype=torch.float64)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
def imsmooth(tensor,
sigma,
stride=1,
padding=0,
padding_mode='constant',
padding_value=0):
r"""Apply a 2D Gaussian filter to a tensor.
The 2D filter itself is implementing by separating the 2D convolution in
two 1D convolutions, first along the vertical direction and then along
the horizontal one. Each 1D Gaussian kernel is given by:
.. math::
f_i \propto \exp\left(-\frac{1}{2} \frac{i^2}{\sigma^2} \right),
~~~ i \in \{-W,\dots,W\},
~~~ W = \lceil 4\sigma \rceil.
This kernel is normalized to sum to one exactly. Given the latter, the
function calls `torch.nn.functional.conv2d`
to perform the actual convolution. Various padding parameters and the
stride are passed to the latter.
Args:
tensor (:class:`torch.Tensor`): :math:`N\times C\times H\times W`
image tensor.
sigma (float): standard deviation of the Gaussian kernel.
stride (int, optional): subsampling factor. Default: ``1``.
padding (int, optional): extra padding. Default: ``0``.
padding_mode (str, optional): ``'constant'``, ``'reflect'`` or
``'replicate'``. Default: ``'constant'``.
padding_value (float, optional): constant value for the `constant`
padding mode. Default: ``0``.
Returns:
:class:`torch.Tensor`: :math:`N\times C\times H\times W` tensor with
the smoothed images.
"""
assert sigma >= 0
width = math.ceil(4 * sigma)
filt = (torch.arange(-width,
width + 1,
dtype=torch.float32,
device=tensor.device) /
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE))
filt = torch.exp(-filt * filt)
filt /= torch.sum(filt)
num_channels = tensor.shape[1]
width = width + padding
if padding_mode == 'constant' and padding_value == 0:
other_padding = width
x = tensor
else:
# pad: (before, after) pairs starting from last dimension backward
x = F.pad(tensor,
(width, width, width, width),
mode=padding_mode,
value=padding_value)
other_padding = 0
padding = 0
x = F.conv2d(x,
filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(other_padding, padding),
stride=(stride, 1),
groups=num_channels)
x = F.conv2d(x,
filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, other_padding),
stride=(1, stride),
groups=num_channels)
return x
class Patch(object):
"""Patch a callable in a module."""
@staticmethod
def resolve(target):
"""Resolve a target into a module and an attribute.
The function resolves a string such as ``'this.that.thing'`` into a
module instance `this.that` (importing the module) and an attribute
`thing`.
Args:
target (str): target string.
Returns:
tuple: module, attribute.
"""
target, attribute = target.rsplit('.', 1)
components = target.split('.')
import_path = components.pop(0)
target = __import__(import_path)
for comp in components:
import_path += '.{}'.format(comp)
__import__(import_path)
target = getattr(target, comp)
return target, attribute
def __init__(self, target, new_callable):
"""Patch a callable in a module.
Args:
target (str): path to the callable to patch.
callable (fun): new callable.
"""
target, attribute = Patch.resolve(target)
self.target = target
self.attribute = attribute
self.orig_callable = getattr(target, attribute)
setattr(target, attribute, new_callable)
def __del__(self):
self.remove()
def remove(self):
"""Remove the patch."""
if self.target is not None:
setattr(self.target, self.attribute, self.orig_callable)
self.target = None
# class ReLUContext(object):
# """
# A context manager that replaces :func:`torch.relu` with
# :attr:`relu_function`.
#
# Args:
# relu_func (:class:`torch.autograd.function.FunctionMeta`): class
# definition of a :class:`torch.autograd.Function`.
# """
#
# def __init__(self, relu_func):
# assert isinstance(relu_func, torch.autograd.function.FunctionMeta)
# self.relu_func = relu_func
# self.patches = []
#
# def __enter__(self):
# relu = self.relu_func().apply
# self.patches = [
# Patch('torch.relu', relu),
# Patch('torch.relu_', relu),
# ]
# return self
#
# def __exit__(self, type, value, traceback):
# for p in self.patches:
# p.remove()
# return False # re-raise any exception
#
def _wrap_in_list(x):
if isinstance(x, list):
return x
elif isinstance(x, tuple):
return list(x)
else:
return [x]
class _InjectContrast(object):
def __init__(self, contrast, non_negative):
self.contrast = contrast
self.non_negative = non_negative
def __call__(self, grad):
assert grad.shape == self.contrast.shape
delta = grad - self.contrast
if self.non_negative:
delta = delta.clamp(min=0)
return delta
class _Catch(object):
def __init__(self, probe):
self.probe = weakref.ref(probe)
def _process_data(self, data):
if not self.probe():
return
p = self.probe()
assert isinstance(data, list)
p.data = data
for i, x in enumerate(p.data):
x.requires_grad_(True)
x.retain_grad()
if len(p.contrast) > i and p.contrast[i] is not None:
injector = _InjectContrast(
p.contrast[i], p.non_negative_contrast)
x.register_hook(injector)
class _CatchInputs(_Catch):
def __call__(self, module, input):
self._process_data(_wrap_in_list(input))
class _CatchOutputs(_Catch):
def __call__(self, module, input, output):
self._process_data(_wrap_in_list(output))
class Probe(object):
"""Probe for a layer.
A probe attaches to a given :class:`torch.nn.Module` instance.
While attached, the object records any data produced by the module along
with the corresponding gradients. Use :func:`remove` to remove the probe.
Examples:
.. code:: python
module = torch.nn.ReLU
probe = Probe(module)
x = torch.randn(1, 10)
y = module(x)
z = y.sum()
z.backward()
print(probe.data[0].shape)
print(probe.data[0].grad.shape)
"""
def __init__(self, module, target='input'):
"""Create a probe attached to the specified module.
The probe intercepts calls to the module on the way forward, capturing
by default all the input activation tensor with their gradients.
The activation tensors are stored as a sequence :attr:`data`.
Args:
module (torch.nn.Module): Module to attach.
target (str): Choose from ``'input'`` or ``'output'``. Use
``'output'`` to intercept the outputs of a module
instead of the inputs into the module. Default: ``'input'``.
.. Warning:
PyTorch module interface (at least until 1.1.0) is partially
broken. In particular, the hook functionality used by the probe
work properly only for atomic module, not for containers such as
sequences or for complex module that run several functions
internally.
"""
self.module = module
self.data = []
self.target = target
self.hook = None
self.contrast = []
self.non_negative_contrast = False
if hasattr(self.module, "inplace"):
self.inplace = self.module.inplace
self.module.inplace = False
if self.target == 'input':
self.hook = module.register_forward_pre_hook(_CatchInputs(self))
elif self.target == 'output':
self.hook = module.register_forward_hook(_CatchOutputs(self))
else:
assert False
def __del__(self):
self.remove()
def remove(self):
"""Remove the probe."""
if self.module is not None:
if hasattr(self.module, "inplace"):
self.module.inplace = self.inplace
self.hook.remove()
self.module = None
class NullContext(object):
def __init__(self):
r"""Null context.
This context does nothing.
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return False
def get_pointing_gradient(pred_y, y, normalize=True):
"""Returns a gradient tensor for the pointing game.
Args:
pred_y (:class:`torch.Tensor`): 4D tensor that the model outputs.
y (int): target label.
normalize (bool): If True, normalize the gradient tensor s.t. it
sums to 1. Default: ``True``.
Returns:
:class:`torch.Tensor`: gradient tensor with the same shape as
:attr:`pred_y`.
"""
assert isinstance(pred_y, torch.Tensor)
assert len(pred_y.shape) == 4 or len(pred_y.shape) == 2
assert pred_y.shape[0] == 1
assert isinstance(y, int)
backward_gradient = torch.zeros_like(pred_y)
backward_gradient[0, y] = torch.exp(pred_y[0, y])
if normalize:
backward_gradient[0, y] /= backward_gradient[0, y].sum()
return backward_gradient
def get_backward_gradient(pred_y, y):
r"""
Returns a gradient tensor that is either equal to :attr:`y` (if y is a
tensor with the same shape as pred_y) or a one-hot encoding in the channels
dimension.
:attr:`y` can be either an ``int``, an array-like list of integers,
or a tensor. If :attr:`y` is a tensor with the same shape as
:attr:`pred_y`, the function returns :attr:`y` unchanged.
Otherwise, :attr:`y` is interpreted as a list of class indices. These
are first unfolded/expanded to one index per batch element in
:attr:`pred_y` (i.e. along the first dimension). Then, this list
is further expanded to all spatial dimensions of :attr:`pred_y`.
(i.e. all but the first two dimensions of :attr:`pred_y`).
Finally, the function return a "gradient" tensor that is a one-hot
indicator tensor for these classes.
Args:
pred_y (:class:`torch.Tensor`): model output tensor.
y (int, :class:`torch.Tensor`, list, or :class:`np.ndarray`): target
label(s) that can be cast to :class:`torch.long`.
Returns:
:class:`torch.Tensor`: gradient tensor with the same shape as
:attr:`pred_y`.
"""
assert isinstance(pred_y, torch.Tensor)
if not isinstance(y, torch.Tensor):
y = torch.tensor(y, dtype=torch.long, device=pred_y.device)
assert isinstance(y, torch.Tensor)
if y.shape == pred_y.shape:
return y
assert y.dtype == torch.long
nspatial = len(pred_y.shape) - 2
grad = torch.zeros_like(pred_y)
y = y.reshape(-1, 1, *((1,) * nspatial)).expand_as(grad)
grad.scatter_(1, y, 1.)
return grad
def get_module(model, module):
r"""Returns a specific layer in a model based.
:attr:`module` is either the name of a module (as given by the
:func:`named_modules` function for :class:`torch.nn.Module` objects) or
a :class:`torch.nn.Module` object. If :attr:`module` is a
:class:`torch.nn.Module` object, then :attr:`module` is returned unchanged.
If :attr:`module` is a str, the function searches for a module with the
name :attr:`module` and returns a :class:`torch.nn.Module` if found;
otherwise, ``None`` is returned.
Args:
model (:class:`torch.nn.Module`): model in which to search for layer.
module (str or :class:`torch.nn.Module`): name of layer (str) or the
layer itself (:class:`torch.nn.Module`).
Returns:
:class:`torch.nn.Module`: specific PyTorch layer (``None`` if the layer
isn't found).
"""
if isinstance(module, torch.nn.Module):
return module
assert isinstance(module, str)
if module == '':
return model
for name, curr_module in model.named_modules():
if name == module:
return curr_module
return None
def gradient_to_saliency(x):
r"""Convert a gradient to a saliency map.
The tensor :attr:`x` must have a valid gradient ``x.grad``.
The function then computes the saliency map :math:`s` given by:
.. math::
s_{n,1,u} = \max_{0 \leq c < C} |dx_{ncu}|
where :math:`n` is the instance index, :math:`c` the channel
index and :math:`u` the spatial multi-index (usually of dimension 2 for
images).
Args:
x (Tensor): activation with gradient.
Return:
Tensor: saliency
"""
return x.grad.abs().max(dim=1, keepdim=True)[0]
def resize_saliency(tensor, saliency, size, mode):
"""Resize a saliency map.
Args:
tensor (:class:`torch.Tensor`): reference tensor.
saliency (:class:`torch.Tensor`): saliency map.
size (bool or tuple of int): if a tuple (i.e., (width, height),
resize :attr:`saliency` to :attr:`size`. If True, resize
:attr:`saliency: to the shape of :attr:`tensor`; otherwise,
return :attr:`saliency` unchanged.
mode (str): mode for :func:`torch.nn.functional.interpolate`.
Returns:
:class:`torch.Tensor`: Resized saliency map.
"""
if size is not False:
if size is True:
size = tensor.shape[2:]
elif isinstance(size, tuple) or isinstance(size, list):
# width, height -> height, width
size = size[::-1]
else:
assert False, "resize must be True, False or a tuple."
saliency = F.interpolate(
saliency, size, mode=mode, align_corners=False)
return saliency
def saliency(model,
input,
target,
saliency_layer='',
resize=False,
resize_mode='bilinear',
smooth=0,
context_builder=NullContext,
gradient_to_saliency=gradient_to_saliency,
get_backward_gradient=get_backward_gradient,
debug=False):
"""Apply a backprop-based attribution method to an image.
The saliency method is specified by a suitable context factory
:attr:`context_builder`. This context is used to modify the backpropagation
algorithm to match a given visualization method. This:
1. Attaches a probe to the output tensor of :attr:`saliency_layer`,
which must be a layer in :attr:`model`. If no such layer is specified,
it selects the input tensor to :attr:`model`.
2. Uses the function :attr:`get_backward_gradient` to obtain a gradient
for the output tensor of the model. This function is passed
as input the output tensor as well as the parameter :attr:`target`.
By default, the :func:`get_backward_gradient` function is used.
The latter generates as gradient a one-hot vector selecting
:attr:`target`, usually the index of the class predicted by
:attr:`model`.
3. Evaluates :attr:`model` on :attr:`input` and then computes the
pseudo-gradient of the model with respect the selected tensor. This
calculation is controlled by :attr:`context_builder`.
4. Extract the pseudo-gradient at the selected tensor as a raw saliency
map.
5. Call :attr:`gradient_to_saliency` to obtain an actual saliency map.
This defaults to :func:`gradient_to_saliency` that takes the maximum
absolute value along the channel dimension of the pseudo-gradient
tensor.
6. Optionally resizes the saliency map thus obtained. By default,
this uses bilinear interpolation and resizes the saliency to the same
spatial dimension of :attr:`input`.
7. Optionally applies a Gaussian filter to the resized saliency map.
The standard deviation :attr:`sigma` of this filter is measured
as a fraction of the maxmum spatial dimension of the resized
saliency map.
8. Removes the probe.
9. Returns the saliency map or optionally a tuple with the saliency map
and a OrderedDict of Probe objects for all modules in the model, which
can be used for debugging.
Args:
model (:class:`torch.nn.Module`): a model.
input (:class:`torch.Tensor`): input tensor.
target (int or :class:`torch.Tensor`): target label(s).
saliency_layer (str or :class:`torch.nn.Module`, optional): name of the
saliency layer (str) or the layer itself (:class:`torch.nn.Module`)
in the model at which to visualize. Default: ``''`` (visualize
at input).
resize (bool or tuple, optional): if True, upsample saliency map to the
same size as :attr:`input`. It is also possible to specify a pair
(width, height) for a different size. Default: ``False``.
resize_mode (str, optional): upsampling method to use. Default:
``'bilinear'``.
smooth (float, optional): amount of Gaussian smoothing to apply to the
saliency map. Default: ``0``.
context_builder (type, optional): type of context to use. Default:
:class:`NullContext`.
gradient_to_saliency (function, optional): function that converts the
pseudo-gradient signal to a saliency map. Default:
:func:`gradient_to_saliency`.
get_backward_gradient (function, optional): function that generates
gradient tensor to backpropagate. Default:
:func:`get_backward_gradient`.
debug (bool, optional): if True, also return an
:class:`collections.OrderedDict` of :class:`Probe` objects for
all modules in the model. Default: ``False``.
Returns:
:class:`torch.Tensor` or tuple: If :attr:`debug` is False, returns a
:class:`torch.Tensor` saliency map at :attr:`saliency_layer`.
Otherwise, returns a tuple of a :class:`torch.Tensor` saliency map
at :attr:`saliency_layer` and an :class:`collections.OrderedDict`
of :class:`Probe` objects for all modules in the model.
"""
# Clear any existing gradient.
if input.grad is not None:
input.grad.data.zero_()
# Disable gradients for model parameters.
orig_requires_grad = {}
for name, param in model.named_parameters():
orig_requires_grad[name] = param.requires_grad
param.requires_grad_(False)
# Set model to eval mode.
if model.training:
orig_is_training = True
model.eval()
else:
orig_is_training = False
# Attach debug probes to every module.
# debug_probes = attach_debug_probes(model, debug=debug)
# Attach a probe to the saliency layer.
probe_target = 'input' if saliency_layer == '' else 'output'
saliency_layer = get_module(model, saliency_layer)
assert saliency_layer is not None, 'We could not find the saliency layer'
probe = Probe(saliency_layer, target=probe_target)
# Do a forward and backward pass.
with context_builder():
output = model(input)
backward_gradient = get_backward_gradient(output, target)
output.backward(backward_gradient)
# Get saliency map from gradient.
saliency_map = gradient_to_saliency(probe.data[0])
# Resize saliency map.
saliency_map = resize_saliency(input,
saliency_map,
resize,
mode=resize_mode)
# Smooth saliency map.
if smooth > 0:
saliency_map = imsmooth(
saliency_map,
sigma=smooth * max(saliency_map.shape[2:]),
padding_mode='replicate'
)
# Remove probe.
probe.remove()
# Restore gradient saving for model parameters.
for name, param in model.named_parameters():
param.requires_grad_(orig_requires_grad[name])
# Restore model's original mode.
if orig_is_training:
model.train()
# if debug:
# return saliency_map, debug_probes
# else:
# return saliency_map
return saliency_map
| 21,113 | 33.726974 | 79 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/common.py | import os
import torch
import random
import numpy as np
import argparse
def setup_config(args, cfg):
# cfg.DATA_ROOT = args.data_root_dir if args.data_root_dir else cfg.DATA_ROOT
cfg.LR_INIT = args.lr if args.lr else cfg.LR_INIT
cfg.TRAIN_BATCH_SIZE = args.bs if args.bs else cfg.TRAIN_BATCH_SIZE
cfg.MAX_EPOCH = args.max_epoch if args.max_epoch else cfg.MAX_EPOCH
cfg.RESUME = args.resume if args.resume else cfg.RESUME
return cfg
def setup_seed(seed=12345):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) # cpu
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='deep learning on personality')
parser.add_argument(
'-c',
'--cfg_file',
help="experiment config file",
default=None,
type=str,
)
parser.add_argument(
'--weight',
dest='weight',
help='initialize with pretrained model weights',
type=str,
)
parser.add_argument(
"--test_only",
action="store_true",
help="only test model on specified weights",
)
parser.add_argument(
'--lr',
type=float,
default=None,
help='learning rate',
)
parser.add_argument(
'--bs',
default=None,
help='training batch size',
)
parser.add_argument(
"--resume",
default=None,
help="saved model path to last training epoch",
)
parser.add_argument(
"-m",
'--max_epoch',
type=int,
default=None,
)
parser.add_argument(
'--set',
dest='set_cfgs',
help='set config keys',
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def get_device(gpu=0):
device = torch.device(
f'cuda:{gpu}'
if torch.cuda.is_available() and gpu is not None
else 'cpu')
return device
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_() # binarize
output = x.div(keep_prob) * mask
return output
| 2,633 | 24.085714 | 81 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/cam.py | """
# Copyright (C) 2020-2021, François-Guillaume Fernandez.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
# code modified from https://github.com/frgfm/torch-cam/tree/master/torchcam/cams
"""
from typing import Optional, List, Tuple
from dpcv.tools.utils import locate_candidate_layer
import math
import logging
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
from typing import Optional, Tuple, Any
from dpcv.tools.utils import locate_linear_layer
__all__ = ['CAM', 'ScoreCAM', 'SSCAM', 'ISCAM']
class _CAM:
"""Implements a class activation map extractor
Args:
model: input model
target_layer: name of the target layer
input_shape: shape of the expected input tensor excluding the batch dimension
enable_hooks: should hooks be enabled by default
"""
def __init__(
self,
model: nn.Module,
target_layer: Optional[str] = None,
input_shape: Tuple[int, ...] = (3, 224, 224),
enable_hooks: bool = True,
conv1d: bool = False,
) -> None:
# Obtain a mapping from module name to module instance for each layer in the model
self.submodule_dict = dict(model.named_modules())
# If the layer is not specified, try automatic resolution
if target_layer is None:
target_layer = locate_candidate_layer(model, input_shape)
# Warn the user of the choice
if isinstance(target_layer, str):
logging.warning(f"no value was provided for `target_layer`, thus set to '{target_layer}'.")
else:
raise ValueError("unable to resolve `target_layer` automatically, please specify its value.")
if target_layer not in self.submodule_dict.keys():
raise ValueError(f"Unable to find submodule {target_layer} in the model")
self.target_layer = target_layer
self.model = model
# Init hooks
self.hook_a: Optional[Tensor] = None
self.hook_handles: List[torch.utils.hooks.RemovableHandle] = []
# Forward hook
self.hook_handles.append(self.submodule_dict[target_layer].register_forward_hook(self._hook_a))
# Enable hooks
self._hooks_enabled = enable_hooks
# Should ReLU be used before normalization
self._relu = False
# Model output is used by the extractor
self._score_used = False
self.conv1d = conv1d
def _hook_a(self, module: nn.Module, input: Tensor, output: Tensor) -> None:
"""Activation hook"""
if self._hooks_enabled:
self.hook_a = output.data
def clear_hooks(self) -> None:
"""Clear model hooks"""
for handle in self.hook_handles:
handle.remove()
self.hook_handles.clear()
@staticmethod
def _normalize(cams: Tensor, spatial_dims: Optional[int] = None) -> Tensor:
"""CAM normalization"""
spatial_dims = cams.ndim if spatial_dims is None else spatial_dims
cams.sub_(cams.flatten(start_dim=-spatial_dims).min(-1).values[(...,) + (None,) * spatial_dims])
cams.div_(cams.flatten(start_dim=-spatial_dims).max(-1).values[(...,) + (None,) * spatial_dims])
return cams
def _get_weights(self, class_idx: int, scores: Optional[Tensor] = None) -> Tensor:
raise NotImplementedError
def _precheck(self, class_idx: int, scores: Optional[Tensor] = None) -> None:
"""Check for invalid computation cases"""
# Check that forward has already occurred
if not isinstance(self.hook_a, Tensor):
raise AssertionError("Inputs need to be forwarded in the model for the conv features to be hooked")
# Check batch size
if self.hook_a.shape[0] != 1:
raise ValueError(f"expected a 1-sized batch to be hooked. Received: {self.hook_a.shape[0]}")
# Check class_idx value
if not isinstance(class_idx, int) or class_idx < 0:
raise ValueError("Incorrect `class_idx` argument value")
# Check scores arg
if self._score_used and not isinstance(scores, torch.Tensor):
raise ValueError("model output scores is required to be passed to compute CAMs")
def __call__(self, class_idx: int, scores: Optional[Tensor] = None, normalized: bool = True) -> Tensor:
# Integrity check
self._precheck(class_idx, scores)
# Compute CAM
return self.compute_cams(class_idx, scores, normalized)
def compute_cams(self, class_idx: int, scores: Optional[Tensor] = None, normalized: bool = True) -> Tensor:
"""Compute the CAM for a specific output class
Args:
class_idx (int): output class index of the target class whose CAM will be computed
scores (torch.Tensor[1, K], optional): forward output scores of the hooked model
normalized (bool, optional): whether the CAM should be normalized
Returns:
torch.Tensor[M, N]: class activation map of hooked conv layer
"""
# Get map weight & unsqueeze it
weights = self._get_weights(class_idx, scores)
missing_dims = self.hook_a.ndim - weights.ndim - 1 # type: ignore[union-attr]
weights = weights[(...,) + (None,) * missing_dims]
# Perform the weighted combination to get the CAM
if self.conv1d:
batch_cams = torch.nansum(weights * self.hook_a.squeeze(0), dim=1) # type: ignore[union-attr]
else:
batch_cams = torch.nansum(weights * self.hook_a.squeeze(0), dim=0) # type: ignore[union-attr]
if self._relu:
batch_cams = F.relu(batch_cams, inplace=True)
# Normalize the CAM
if normalized:
batch_cams = self._normalize(batch_cams)
return batch_cams
def extra_repr(self) -> str:
return f"target_layer='{self.target_layer}'"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.extra_repr()})"
class CAM(_CAM):
"""Implements a class activation map extractor as described in `"Learning Deep Features for Discriminative
Localization" <https://arxiv.org/pdf/1512.04150.pdf>`_.
Args:
model: input model
target_layer: name of the target layer
fc_layer: name of the fully convolutional layer
input_shape: shape of the expected input tensor excluding the batch dimension
"""
def __init__(
self,
model: nn.Module,
target_layer: Optional[str] = None,
fc_layer: Optional[str] = None,
input_shape: Tuple[int, ...] = (3, 224, 224),
**kwargs: Any,
) -> None:
super().__init__(model, target_layer, input_shape, **kwargs)
# If the layer is not specified, try automatic resolution
if fc_layer is None:
fc_layer = locate_linear_layer(model)
# Warn the user of the choice
if isinstance(fc_layer, str):
logging.warning(f"no value was provided for `fc_layer`, thus set to '{fc_layer}'.")
else:
raise ValueError("unable to resolve `fc_layer` automatically, please specify its value.")
# Softmax weight
self._fc_weights = self.submodule_dict[fc_layer].weight.data
# squeeze to accomodate replacement by Conv1x1
if self._fc_weights.ndim > 2:
self._fc_weights = self._fc_weights.view(*self._fc_weights.shape[:2])
def _get_weights(self, class_idx: int, scores: Optional[Tensor] = None) -> Tensor:
"""Computes the weight coefficients of the hooked activation maps"""
# Take the FC weights of the target class
return self._fc_weights[class_idx, :]
class ScoreCAM(_CAM):
"""Implements a class activation map extractor as described in `"Score-CAM:
Score-Weighted Visual Explanations for Convolutional Neural Networks" <https://arxiv.org/pdf/1910.01279.pdf>`_.
Args:
model: input model
target_layer: name of the target layer
batch_size: batch size used to forward masked inputs
input_shape: shape of the expected input tensor excluding the batch dimension
"""
def __init__(
self,
model: nn.Module,
target_layer: Optional[str] = None,
batch_size: int = 32,
input_shape: Tuple[int, ...] = (3, 224, 224),
**kwargs: Any,
) -> None:
super().__init__(model, target_layer, input_shape, **kwargs)
# Input hook
self.hook_handles.append(model.register_forward_pre_hook(self._store_input))
self.bs = batch_size
# Ensure ReLU is applied to CAM before normalization
self._relu = True
def _store_input(self, module: nn.Module, input: Tensor) -> None:
"""Store model input tensor"""
if self._hooks_enabled:
self._input = input[0].data.clone()
def _get_weights(self, class_idx: int, scores: Optional[Tensor] = None) -> Tensor:
"""Computes the weight coefficients of the hooked activation maps"""
# Normalize the activation
self.hook_a: Tensor
upsampled_a = self._normalize(self.hook_a, self.hook_a.ndim - 2)
# Upsample it to input_size
# 1 * O * M * N
spatial_dims = self._input.ndim - 2
interpolation_mode = 'bilinear' if spatial_dims == 2 else 'trilinear' if spatial_dims == 3 else 'nearest'
upsampled_a = F.interpolate(upsampled_a, self._input.shape[2:], mode=interpolation_mode, align_corners=False)
# Use it as a mask
# O * I * H * W
masked_input = upsampled_a.squeeze(0).unsqueeze(1) * self._input
# Initialize weights
weights = torch.zeros(masked_input.shape[0], dtype=masked_input.dtype).to(device=masked_input.device)
# Disable hook updates
self._hooks_enabled = False
# Switch to eval
origin_mode = self.model.training
self.model.eval()
# Process by chunk (GPU RAM limitation)
for idx in range(math.ceil(weights.shape[0] / self.bs)):
selection_slice = slice(idx * self.bs, min((idx + 1) * self.bs, weights.shape[0]))
with torch.no_grad():
# Get the softmax probabilities of the target class
weights[selection_slice] = F.softmax(self.model(masked_input[selection_slice]), dim=1)[:, class_idx]
# Reenable hook updates
self._hooks_enabled = True
# Put back the model in the correct mode
self.model.training = origin_mode
return weights
def __repr__(self) -> str:
return f"{self.__class__.__name__}(batch_size={self.bs})"
class SSCAM(ScoreCAM):
"""Implements a class activation map extractor as described in `"SS-CAM: Smoothed Score-CAM for
Sharper Visual Feature Localization" <https://arxiv.org/pdf/2006.14255.pdf>`_.
Args:
model: input model
target_layer: name of the target layer
batch_size: batch size used to forward masked inputs
num_samples: number of noisy samples used for weight computation
std: standard deviation of the noise added to the normalized activation
input_shape: shape of the expected input tensor excluding the batch dimension
"""
def __init__(
self,
model: nn.Module,
target_layer: Optional[str] = None,
batch_size: int = 32,
num_samples: int = 35,
std: float = 2.0,
input_shape: Tuple[int, ...] = (3, 224, 224),
**kwargs: Any,
) -> None:
super().__init__(model, target_layer, batch_size, input_shape, **kwargs)
self.num_samples = num_samples
self.std = std
self._distrib = torch.distributions.normal.Normal(0, self.std)
def _get_weights(self, class_idx: int, scores: Optional[Tensor] = None) -> Tensor:
"""Computes the weight coefficients of the hooked activation maps"""
# Normalize the activation
self.hook_a: Tensor
upsampled_a = self._normalize(self.hook_a, self.hook_a.ndim - 2)
# Upsample it to input_size
# 1 * O * M * N
spatial_dims = self._input.ndim - 2
interpolation_mode = 'bilinear' if spatial_dims == 2 else 'trilinear' if spatial_dims == 3 else 'nearest'
upsampled_a = F.interpolate(upsampled_a, self._input.shape[2:], mode=interpolation_mode, align_corners=False)
# Use it as a mask
# O * I * H * W
upsampled_a = upsampled_a.squeeze(0).unsqueeze(1)
# Initialize weights
weights = torch.zeros(upsampled_a.shape[0], dtype=upsampled_a.dtype).to(device=upsampled_a.device)
# Disable hook updates
self._hooks_enabled = False
# Switch to eval
origin_mode = self.model.training
self.model.eval()
for _idx in range(self.num_samples):
noisy_m = self._input * (upsampled_a +
self._distrib.sample(self._input.size()).to(device=self._input.device))
# Process by chunk (GPU RAM limitation)
for idx in range(math.ceil(weights.shape[0] / self.bs)):
selection_slice = slice(idx * self.bs, min((idx + 1) * self.bs, weights.shape[0]))
with torch.no_grad():
# Get the softmax probabilities of the target class
weights[selection_slice] += F.softmax(self.model(noisy_m[selection_slice]), dim=1)[:, class_idx]
weights.div_(self.num_samples)
# Reenable hook updates
self._hooks_enabled = True
# Put back the model in the correct mode
self.model.training = origin_mode
return weights
def __repr__(self) -> str:
return f"{self.__class__.__name__}(batch_size={self.bs}, num_samples={self.num_samples}, std={self.std})"
class ISCAM(ScoreCAM):
"""Implements a class activation map extractor as described in `"IS-CAM: Integrated Score-CAM for axiomatic-based
explanations" <https://arxiv.org/pdf/2010.03023.pdf>`_.
Args:
model: input model
target_layer: name of the target layer
batch_size: batch size used to forward masked inputs
num_samples: number of noisy samples used for weight computation
input_shape: shape of the expected input tensor excluding the batch dimension
"""
def __init__(
self,
model: nn.Module,
target_layer: Optional[str] = None,
batch_size: int = 32,
num_samples: int = 10,
input_shape: Tuple[int, ...] = (3, 224, 224),
**kwargs: Any,
) -> None:
super().__init__(model, target_layer, batch_size, input_shape, **kwargs)
self.num_samples = num_samples
def _get_weights(self, class_idx: int, scores: Optional[Tensor] = None) -> Tensor:
"""Computes the weight coefficients of the hooked activation maps"""
# Normalize the activation
self.hook_a: Tensor
upsampled_a = self._normalize(self.hook_a, self.hook_a.ndim - 2)
# Upsample it to input_size
# 1 * O * M * N
spatial_dims = self._input.ndim - 2
interpolation_mode = 'bilinear' if spatial_dims == 2 else 'trilinear' if spatial_dims == 3 else 'nearest'
upsampled_a = F.interpolate(upsampled_a, self._input.shape[2:], mode=interpolation_mode, align_corners=False)
# Use it as a mask
# O * I * H * W
upsampled_a = upsampled_a.squeeze(0).unsqueeze(1)
# Initialize weights
weights = torch.zeros(upsampled_a.shape[0], dtype=upsampled_a.dtype).to(device=upsampled_a.device)
# Disable hook updates
self._hooks_enabled = False
fmap = torch.zeros((upsampled_a.shape[0], *self._input.shape[1:]),
dtype=upsampled_a.dtype, device=upsampled_a.device)
# Switch to eval
origin_mode = self.model.training
self.model.eval()
for _idx in range(self.num_samples):
fmap += (_idx + 1) / self.num_samples * self._input * upsampled_a
# Process by chunk (GPU RAM limitation)
for idx in range(math.ceil(weights.shape[0] / self.bs)):
selection_slice = slice(idx * self.bs, min((idx + 1) * self.bs, weights.shape[0]))
with torch.no_grad():
# Get the softmax probabilities of the target class
weights[selection_slice] += F.softmax(self.model(fmap[selection_slice]), dim=1)[:, class_idx]
# Reenable hook updates
self._hooks_enabled = True
# Put back the model in the correct mode
self.model.training = origin_mode
return weights
| 16,934 | 38.567757 | 117 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/draw.py | import math
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import torch
def show_confMat(confusion_mat, classes, set_name, out_dir, epoch=999, verbose=False, perc=False):
cls_num = len(classes)
confusion_mat_tmp = confusion_mat.copy()
for i in range(len(classes)):
confusion_mat_tmp[i, :] = confusion_mat[i, :] / confusion_mat[i, :].sum()
if cls_num < 10:
figsize = 6
elif cls_num >= 100:
figsize = 30
else:
figsize = 35 # np.linspace(6, 30, 91)[cls_num-10]
plt.figure(figsize=(int(figsize), int(figsize*1.3)))
cmap = plt.cm.get_cmap('Greys')
plt.imshow(confusion_mat_tmp, cmap=cmap)
plt.colorbar(fraction=0.03)
xlocations = np.array(range(len(classes)))
plt.xticks(xlocations, list(classes), rotation=60)
plt.yticks(xlocations, list(classes))
plt.xlabel('Predict label')
plt.ylabel('True label')
plt.title("Confusion_Matrix_{}_{}".format(set_name, epoch))
if perc:
cls_per_nums = confusion_mat.sum(axis=0)
conf_mat_per = confusion_mat / cls_per_nums
for i in range(confusion_mat_tmp.shape[0]):
for j in range(confusion_mat_tmp.shape[1]):
plt.text(x=j, y=i, s="{:.0%}".format(conf_mat_per[i, j]), va='center', ha='center', color='red',
fontsize=10)
else:
for i in range(confusion_mat_tmp.shape[0]):
for j in range(confusion_mat_tmp.shape[1]):
plt.text(x=j, y=i, s=int(confusion_mat[i, j]), va='center', ha='center', color='red', fontsize=10)
plt.savefig(os.path.join(out_dir, "Confusion_Matrix_{}.png".format(set_name)))
plt.close()
if verbose:
for i in range(cls_num):
print('class:{:<10}, total num:{:<6}, correct num:{:<5} Recall: {:.2%} Precision: {:.2%}'.format(
classes[i], np.sum(confusion_mat[i, :]), confusion_mat[i, i],
confusion_mat[i, i] / (1e-9 + np.sum(confusion_mat[i, :])),
confusion_mat[i, i] / (1e-9 + np.sum(confusion_mat[:, i]))))
def plot_line(train_x, train_y, valid_x, valid_y, mode, out_dir):
plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')
plt.ylabel(str(mode))
plt.xlabel('Epoch')
location = 'upper right' if mode == 'loss' else 'upper left'
plt.legend(loc=location)
plt.title('_'.join([mode]))
plt.savefig(os.path.join(out_dir, mode + '.png'))
plt.close()
def pil_to_tensor(pil_image):
r"""Convert a PIL image to a tensor.
Args:
pil_image (:class:`PIL.Image`): PIL image.
Returns:
:class:`torch.Tensor`: the image as a :math:`3\times H\times W` tensor
in the [0, 1] range.
"""
pil_image = np.array(pil_image)
if len(pil_image.shape) == 2:
pil_image = pil_image[:, :, None]
return torch.tensor(pil_image, dtype=torch.float32).permute(2, 0, 1) / 255
def imsc(img, *args, quiet=False, lim=None, interpolation='lanczos', **kwargs):
r"""Rescale and displays an image represented as a img.
The function scales the img :attr:`im` to the [0 ,1] range.
The img is assumed to have shape :math:`3\times H\times W` (RGB)
:math:`1\times H\times W` (grayscale).
Args:
img (:class:`torch.Tensor` or :class:`PIL.Image`): image.
quiet (bool, optional): if False, do not display image.
Default: ``False``.
lim (list, optional): maximum and minimum intensity value for
rescaling. Default: ``None``.
interpolation (str, optional): The interpolation mode to use with
:func:`matplotlib.pyplot.imshow` (e.g. ``'lanczos'`` or
``'nearest'``). Default: ``'lanczos'``.
Returns:
:class:`torch.Tensor`: Rescaled image img.
"""
if isinstance(img, Image.Image):
img = pil_to_tensor(img)
handle = None
with torch.no_grad():
if not lim:
lim = [img.min(), img.max()]
img = img - lim[0] # also makes a copy
img.mul_(1 / (lim[1] - lim[0]))
img = torch.clamp(img, min=0, max=1)
if not quiet:
bitmap = img.expand(3, *img.shape[1:]).permute(1, 2, 0).cpu().numpy()
handle = plt.imshow(bitmap, *args, interpolation=interpolation, **kwargs)
curr_ax = plt.gca()
curr_ax.axis('off')
return img, handle
def plot_example(input,
saliency,
method,
category_id,
show_plot=True,
save_path=None):
"""Plot an example.
Args:
input (:class:`torch.Tensor`): 4D tensor containing input images.
saliency (:class:`torch.Tensor`): 4D tensor containing saliency maps.
method (str): name of saliency method.
category_id (int): ID of ImageNet category.
show_plot (bool, optional): If True, show plot. Default: ``False``.
save_path (str, optional): Path to save figure to. Default: ``None``.
"""
# from utils import imsc
if isinstance(category_id, int):
category_id = [category_id]
batch_size = len(input)
plt.clf()
for i in range(batch_size):
class_i = category_id[i % len(category_id)]
plt.subplot(batch_size, 2, 1 + 2 * i)
imsc(input[i])
plt.title('input image', fontsize=8)
plt.subplot(batch_size, 2, 2 + 2 * i)
imsc(saliency[i], interpolation='none')
plt.title(f'{method}', fontsize=8)
# Save figure if path is specified.
if save_path:
save_dir = os.path.dirname(os.path.abspath(save_path))
# Create directory if necessary.
if not os.path.exists(save_dir):
os.makedirs(save_dir)
ext = os.path.splitext(save_path)[1].strip('.')
plt.savefig(save_path, format=ext, bbox_inches='tight')
# Show plot if desired.
if show_plot:
plt.show()
| 5,948 | 33.189655 | 114 | py |
DeepPersonality | DeepPersonality-main/dpcv/tools/cam_vis.py | """
code modified from https://github.com/frgfm/torch-cam/tree/master/torchcam
"""
import torch
from matplotlib import cm
import numpy as np
from PIL import Image
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if isinstance(pic, torch.Tensor):
if pic.is_floating_point() and mode != 'F':
pic = pic.mul(255).byte()
npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
def overlay_mask(img: Image.Image, mask: Image.Image, colormap: str = 'jet', alpha: float = 0.7) -> Image.Image:
"""Overlay a colormapped mask on a background image
Args:
img: background image
mask: mask to be overlayed in grayscale
colormap: colormap to be applied on the mask
alpha: transparency of the background image
Returns:
overlayed image
"""
if not isinstance(img, Image.Image) or not isinstance(mask, Image.Image):
raise TypeError('img and mask arguments need to be PIL.Image')
if not isinstance(alpha, float) or alpha < 0 or alpha >= 1:
raise ValueError('alpha argument is expected to be of type float between 0 and 1')
cmap = cm.get_cmap(colormap)
# Resize mask and apply colormap
overlay = mask.resize(img.size, resample=Image.BICUBIC)
overlay = (255 * cmap(np.asarray(overlay) ** 2)[:, :, :3]).astype(np.uint8)
# Overlay the image with the mask
overlayed_img = Image.fromarray((alpha * np.asarray(img) + (1 - alpha) * overlay).astype(np.uint8))
return overlayed_img
def overlay_audio_mask(audio, mask):
mask = mask.view(1, -1).numpy()
mask = Image.fromarray(mask, mode="F")
mask = mask.resize((1, audio.shape[-1]))
mask = np.array(mask).T
aud_mask = audio.numpy() * mask
return aud_mask
| 4,942 | 35.88806 | 112 | py |
DeepPersonality | DeepPersonality-main/dpcv/checkpoint/save.py | import os
import torch
def save_model(epoch, best_acc, model, optimizer, output_dir, cfg):
if isinstance(optimizer, list):
optimizer = optimizer[1] # for cr net
checkpoint = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"best_acc": best_acc
}
pkl_name = "checkpoint_{}.pkl".format(epoch) if epoch != (cfg.MAX_EPOCH - 1) else "checkpoint_last.pkl"
path_checkpoint = os.path.join(output_dir, pkl_name)
torch.save(checkpoint, path_checkpoint)
def resume_training(checkpoint_path, model, optimizer):
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
return model, optimizer, epoch
def load_model(model, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["model_state_dict"])
return model
| 1,025 | 30.090909 | 107 | py |
DeepPersonality | DeepPersonality-main/dpcv/checkpoint/load.py | import re
from collections import OrderedDict
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
class CheckpointLoader:
"""A general checkpoint loader to manage all schemes."""
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if (prefix not in cls._schemes) or force:
cls._schemes[prefix] = loader
else:
raise KeyError(
f'{prefix} is already registered as a loader backend, '
'add "force=True" if you want to override it')
# sort, longer prefixes take priority
cls._schemes = OrderedDict(
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
@classmethod
def register_scheme(cls, prefixes, loader=None, force=False):
"""Register a loader to CheckpointLoader.
This method can be used as a normal class method or a decorator.
Args:
prefixes (str or list[str] or tuple[str]):
The prefix of the registered loader.
loader (function, optional): The loader function to be registered.
When this method is used as a decorator, loader is None.
Defaults to None.
force (bool, optional): Whether to override the loader
if the prefix has already been registered. Defaults to False.
"""
if loader is not None:
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
@classmethod
def _get_checkpoint_loader(cls, path):
"""Finds a loader that supports the given path. Falls back to the local
loader if no other loader is found.
Args:
path (str): checkpoint path
Returns:
loader (function): checkpoint loader
"""
for p in cls._schemes:
if path.startswith(p):
return cls._schemes[p]
@classmethod
def load_checkpoint(cls, filename, map_location=None, logger=None):
"""load checkpoint through URL scheme path.
Args:
filename (str): checkpoint file name with given prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
logger (:mod:`logging.Logger`, optional): The logger for message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint_loader = cls._get_checkpoint_loader(filename)
# class_name = checkpoint_loader.__name__
return checkpoint_loader(filename, map_location)
def _load_checkpoint(filename, map_location=None, logger=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str, optional): Same as :func:`torch.load`.
Default: None.
logger (:mod:`logging.Logger`, optional): The logger for error message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
| 7,798 | 35.787736 | 79 | py |
DeepPersonality | DeepPersonality-main/dpcv/experiment/exp_runner.py | import os
import json
import numpy as np
import torch
from datetime import datetime
from dpcv.data.datasets.build import build_dataloader
from dpcv.modeling.networks.build import build_model
from dpcv.modeling.loss.build import build_loss_func
from dpcv.modeling.solver.build import build_solver, build_scheduler
from dpcv.engine.build import build_trainer
from dpcv.evaluation.summary import TrainSummary
from dpcv.checkpoint.save import save_model, resume_training, load_model
from dpcv.evaluation.metrics import compute_pcc, compute_ccc
from dpcv.tools.logger import make_logger
class ExpRunner:
def __init__(self, cfg, feature_extract=None):
""" run exp from config file
arg:
cfg_file: config file of an experiment
"""
"""
construct certain experiment by the following template
step 1: prepare dataloader
step 2: prepare model and loss function
step 3: select optimizer for gradient descent algorithm
step 4: prepare trainer for typical training in pytorch manner
"""
self.cfg = cfg
self.logger, self.log_dir = make_logger(cfg.TRAIN.OUTPUT_DIR)
self.log_cfg_info()
if not feature_extract:
self.data_loader = self.build_dataloader()
self.model = self.build_model()
self.loss_f = self.build_loss_function()
self.optimizer = self.build_solver()
self.scheduler = self.build_scheduler()
self.collector = TrainSummary()
self.trainer = self.build_trainer()
def build_dataloader(self):
return build_dataloader(self.cfg)
def build_model(self):
return build_model(self.cfg)
def build_loss_function(self):
return build_loss_func(self.cfg)
def build_solver(self):
return build_solver(self.cfg, self.model)
def build_scheduler(self):
return build_scheduler(self.cfg, self.optimizer)
def build_trainer(self):
return build_trainer(self.cfg, self.collector, self.logger)
def before_train(self, cfg):
# cfg = self.cfg.TRAIN
if cfg.RESUME:
self.model, self.optimizer, epoch = resume_training(cfg.RESUME, self.model, self.optimizer)
cfg.START_EPOCH = epoch
self.logger.info(f"resume training from {cfg.RESUME}")
if self.cfg.SOLVER.RESET_LR:
self.logger.info("change learning rate form [{}] to [{}]".format(
self.optimizer.param_groups[0]["lr"],
self.cfg.SOLVER.LR_INIT,
))
self.optimizer.param_groups[0]["lr"] = self.cfg.SOLVER.LR_INIT
def train_epochs(self, cfg):
# cfg = self.cfg.TRAIN
for epoch in range(cfg.START_EPOCH, cfg.MAX_EPOCH):
self.trainer.train(self.data_loader["train"], self.model, self.loss_f, self.optimizer, epoch)
if epoch % cfg.VALID_INTERVAL == 0:
self.trainer.valid(self.data_loader["valid"], self.model, self.loss_f, epoch)
self.scheduler.step()
if self.collector.model_save and epoch % cfg.VALID_INTERVAL == 0:
save_model(epoch, self.collector.best_valid_acc, self.model, self.optimizer, self.log_dir, cfg)
self.collector.update_best_epoch(epoch)
if epoch == (cfg.MAX_EPOCH - 1):
save_model(epoch, self.collector.best_valid_acc, self.model, self.optimizer, self.log_dir, cfg)
def after_train(self, cfg):
# cfg = self.cfg.TRAIN
# self.collector.draw_epo_info(log_dir=self.log_dir)
self.logger.info(
"{} done, best acc: {} in :{}".format(
datetime.strftime(datetime.now(), '%m-%d_%H-%M'),
self.collector.best_valid_acc,
self.collector.best_epoch,
)
)
def train(self):
cfg = self.cfg.TRAIN
self.before_train(cfg)
self.train_epochs(cfg)
self.after_train(cfg)
def test(self, weight=None):
self.logger.info("Test only mode")
cfg = self.cfg.TEST
cfg.WEIGHT = weight if weight else cfg.WEIGHT
if cfg.WEIGHT:
self.model = load_model(self.model, cfg.WEIGHT)
else:
try:
weights = [file for file in os.listdir(self.log_dir) if file.endswith(".pkl") and ("last" not in file)]
weights = sorted(weights, key=lambda x: int(x[11:-4]))
weight_file = os.path.join(self.log_dir, weights[-1])
except IndexError:
weight_file = os.path.join(self.log_dir, "checkpoint_last.pkl")
self.logger.info(f"test with model {weight_file}")
self.model = load_model(self.model, weight_file)
if not self.cfg.TEST.FULL_TEST:
ocean_acc_avg, ocean_acc, dataset_output, dataset_label, mse = self.trainer.test(
self.data_loader["test"], self.model
)
self.logger.info("mse: {} mean: {}".format(mse[0], mse[1]))
else:
ocean_acc_avg, ocean_acc, dataset_output, dataset_label = self.trainer.full_test(
self.data_loader["full_test"], self.model
)
self.logger.info("acc: {} mean: {}".format(ocean_acc, ocean_acc_avg))
if cfg.COMPUTE_PCC:
pcc_dict, pcc_mean = compute_pcc(dataset_output, dataset_label)
self.logger.info(f"pcc: {pcc_dict} mean: {pcc_mean}")
if cfg.COMPUTE_CCC:
ccc_dict, ccc_mean = compute_ccc(dataset_output, dataset_label)
self.logger.info(f"ccc: {ccc_dict} mean: {ccc_mean}")
if cfg.SAVE_DATASET_OUTPUT:
os.makedirs(cfg.SAVE_DATASET_OUTPUT, exist_ok=True)
torch.save(dataset_output, os.path.join(cfg.SAVE_DATASET_OUTPUT, "pred.pkl"))
torch.save(dataset_label, os.path.join(cfg.SAVE_DATASET_OUTPUT, "label.pkl"))
return
def run(self):
self.train()
self.test()
def log_cfg_info(self):
"""
record training info for convenience of results analysis
"""
string = json.dumps(self.cfg, sort_keys=True, indent=4, separators=(',', ':'))
self.logger.info(string)
def data_extract(self, dataloader, output_dir):
return self.trainer.data_extract(self.model, dataloader, output_dir)
| 6,336 | 36.720238 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/13_tpn_on_personality.py | import torch.nn as nn
import torch.optim as optim
from dpcv.config.tpn_cfg import cfg
from dpcv.modeling.networks.TSN2D import get_tpn_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.tpn_data import make_data_loader
from dpcv.engine.bi_modal_trainer import TPNTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_tpn_model()
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = TPNTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,291 | 31.3 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/04_cr_audiovisual_network.py | import torch.nn as nn
import torch.optim as optim
from datetime import datetime
from dpcv.config.crnet_cfg import cfg as cr_cfg
from dpcv.engine.crnet_trainer import CRNetTrainer
from dpcv.tools.logger import make_logger
from dpcv.modeling.networks.cr_net import get_crnet_model
from dpcv.checkpoint.save import save_model, resume_training, load_model
from dpcv.data.datasets.cr_data import make_data_loader
from dpcv.tools.common import parse_args, setup_config, setup_seed
from dpcv.evaluation.summary import TrainSummary
from dpcv.modeling.loss.cr_loss import one_hot_CELoss, BellLoss
from dpcv.evaluation.metrics import compute_ccc, compute_pcc
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
train_loader = make_data_loader(cfg, mode="train")
valid_loader = make_data_loader(cfg, mode="valid")
test_loader = make_data_loader(cfg, mode="test")
model = get_crnet_model(only_train_guider=True)
loss_f = {"ce_loss": one_hot_CELoss, "bell_loss": BellLoss(), "mse_loss": nn.MSELoss(), "l1_loss": nn.L1Loss()}
optimizer_fir = optim.SGD(model.parameters(), lr=cfg.LR_INIT, momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY)
optimizer_sec = optim.Adam(
model.parameters(), betas=(cfg.BETA_1, cfg.BETA_2), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY
)
optimizer = [optimizer_fir, optimizer_sec]
scheduler = optim.lr_scheduler.MultiStepLR(optimizer_sec, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = CRNetTrainer(cfg, collector, logger)
if cfg.TEST_ONLY:
model.train_regressor()
model = load_model(model, cfg.WEIGHT)
ocean_acc_avg, ocean_acc, dataset_output, dataset_label = trainer.test(test_loader, model)
pcc_dict, pcc_mean = compute_pcc(dataset_output, dataset_label)
ccc_dict, ccc_mean = compute_ccc(dataset_output, dataset_label)
logger.info(f"acc: {ocean_acc} mean: {ocean_acc_avg}")
logger.info(f"pcc: {pcc_dict} mean: {pcc_mean}")
logger.info(f"acc: {ccc_dict} mean: {ccc_mean}")
return
if cfg.RESUME:
model, optimizer[1], epoch = resume_training(cfg.RESUME, model, optimizer[1])
cfg.START_EPOCH = epoch
logger.info(f"resume training from {cfg.RESUME}")
for epoch in range(cfg.START_EPOCH, cfg.TRAIN_CLS_EPOCH):
if cfg.START_EPOCH > cfg.TRAIN_CLS_EPOCH:
break
model.train_classifier()
trainer.train(train_loader, model, loss_f, optimizer, epoch)
for epoch in range(cfg.START_EPOCH, cfg.MAX_EPOCH):
model.train_regressor()
# train for one epoch
trainer.train(train_loader, model, loss_f, optimizer, epoch)
# eval after training an epoch
trainer.valid(valid_loader, model, loss_f, epoch)
# update training lr every epoch
scheduler.step()
# save model
if collector.model_save:
save_model(epoch, collector.best_valid_acc, model, optimizer[1], log_dir, cfg)
collector.update_best_epoch(epoch)
collector.draw_epo_info(cfg.MAX_EPOCH - cfg.START_EPOCH, log_dir)
logger.info(
"{} done, best acc: {} in :{}".format(
datetime.strftime(datetime.now(), '%m-%d_%H-%M'), collector.best_valid_acc, collector.best_epoch)
)
if __name__ == "__main__":
args = parse_args()
main(args, cr_cfg)
| 3,449 | 40.071429 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/08_senet_on_personality.py | import torch.optim as optim
import torch.nn as nn
from dpcv.config.senet_cfg import cfg
from dpcv.modeling.module.se_resnet import se_resnet50
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_frame_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = se_resnet50(5)
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,314 | 31.875 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/09_hrnet_on_personality.py | import torch.optim as optim
import torch.nn as nn
from dpcv.config.hrnet_cls_cfg import cfg
from dpcv.modeling.networks.hr_net_cls import get_hr_net_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_frame_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_hr_net_model()
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,330 | 32.275 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/01_deep_bimodal_regression_image.py | import torch.nn as nn
import torch.optim as optim
from dpcv.config.deep_bimodal_regression_cfg import cfg
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.modeling.networks.dan import get_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_frame_data import make_data_loader
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_model(pretrained=True)
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,337 | 33.307692 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/05_persEmoN.py | import torch.optim as optim
from dpcv.config.per_emo_cfg import cfg
from dpcv.modeling.networks.sphereface_net import get_pers_emo_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.pers_emo_data import make_data_loader
from dpcv.engine.bi_modal_trainer import PersEmoTrainer
from dpcv.modeling.loss.pers_emo_loss import per_emo_loss
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_pers_emo_model()
loss_f = per_emo_loss
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = PersEmoTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,363 | 33.1 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/07_interpret_audio_net.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchaudio
from dpcv.config.interpret_aud_cfg import cfg
from dpcv.engine.bi_modal_trainer import AudioTrainer
from dpcv.modeling.networks.audio_interpretability_net import get_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.interpretability_audio_data import make_data_loader, norm
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test")
}
model = get_model(cfg)
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = AudioTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
def audio_process(aud_file):
aud_data, sample_rate = torchaudio.load(aud_file)
trans_aud = torchaudio.transforms.Resample(sample_rate, 4000)(aud_data[0, :].view(1, -1))
trans_fft = torch.fft.fft(trans_aud)
half_length = int(trans_aud.shape[-1] / 2)
trans_fre = torch.abs(trans_fft)[..., :half_length]
trans_fre_norm = norm(trans_fre)
if trans_fre_norm.shape[-1] < 30604:
print("unusual input audio with the length:{}".format(trans_fre_norm.shape[-1]))
return trans_fre_norm
def load_model(cfg, weights):
model = get_model(cfg)
checkpoint = torch.load(weights)
model.load_state_dict(checkpoint["model_state_dict"])
return model
def visualize_cam(model_weights, image, trait_id=None):
from dpcv.tools.cam import CAM
from dpcv.tools.cam_vis import to_pil_image, overlay_audio_mask
import matplotlib.pylab as plt
aud_tensor = audio_process(image)
model = load_model(cfg, model_weights)
cam_extractor = CAM(model, "gap", enable_hooks=False, conv1d=True)
cam_extractor._hooks_enabled = True
model.zero_grad()
scores = model(aud_tensor.unsqueeze(0).cuda())
trait_id = scores.squeeze(0).argmax().item() if trait_id is None else trait_id
activation_map = cam_extractor(trait_id, scores).cpu()
cam_extractor.clear_hooks()
cam_extractor._hooks_enabled = False
# heatmap = to_pil_image(activation_map, mode='F')
result = overlay_audio_mask(aud_tensor, activation_map)
plt.plot(result[0])
plt.show()
if __name__ == "__main__":
# args = parse_args()
# main(args, cfg)
wav_ls = ["../datasets/raw_voice/validationData/0mym1CooiTE.005.wav",
"../datasets/raw_voice/validationData/0uCqd5hZcyI.004.wav",
"../datasets/raw_voice/validationData/1pm5uoU85FI.004.wav",
"../datasets/raw_voice/validationData/2rV3Ibtdnvs.001.wav",
"../datasets/raw_voice/validationData/5KHOpRCxnwQ.001.wav"]
for wav in wav_ls:
visualize_cam(
"../results/interpret_aud/11-06_00-35/checkpoint_21.pkl",
wav,
)
| 3,415 | 34.583333 | 101 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.