repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
fastai | fastai-master/fastai/losses.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01a_losses.ipynb.
# %% ../nbs/01a_losses.ipynb 2
from __future__ import annotations
from .imports import *
from .torch_imports import *
from .torch_core import *
from .layers import *
# %% auto 0
__all__ = ['BaseLoss', 'CrossEntropyLossFlat', 'FocalLoss', 'FocalLossFlat', 'BCEWithLogitsLossFlat', 'BCELossFlat',
'MSELossFlat', 'L1LossFlat', 'LabelSmoothingCrossEntropy', 'LabelSmoothingCrossEntropyFlat', 'DiceLoss']
# %% ../nbs/01a_losses.ipynb 5
class BaseLoss():
"Same as `loss_cls`, but flattens input and target."
activation=decodes=noops
def __init__(self,
loss_cls, # Uninitialized PyTorch-compatible loss
*args,
axis:int=-1, # Class axis
flatten:bool=True, # Flatten `inp` and `targ` before calculating loss
floatify:bool=False, # Convert `targ` to `float`
is_2d:bool=True, # Whether `flatten` keeps one or two channels when applied
**kwargs
):
store_attr("axis,flatten,floatify,is_2d")
self.func = loss_cls(*args,**kwargs)
functools.update_wrapper(self, self.func)
def __repr__(self) -> str: return f"FlattenedLoss of {self.func}"
@property
def reduction(self) -> str: return self.func.reduction
@reduction.setter
def reduction(self, v:str):
"Sets the reduction style (typically 'mean', 'sum', or 'none')"
self.func.reduction = v
def _contiguous(self, x:Tensor) -> TensorBase:
"Move `self.axis` to the last dimension and ensure tensor is contigous for `Tensor` otherwise just return"
return TensorBase(x.transpose(self.axis,-1).contiguous()) if isinstance(x,torch.Tensor) else x
def __call__(self,
inp:Tensor|MutableSequence, # Predictions from a `Learner`
targ:Tensor|MutableSequence, # Actual y label
**kwargs
) -> TensorBase: # `loss_cls` calculated on `inp` and `targ`
inp,targ = map(self._contiguous, (inp,targ))
if self.floatify and targ.dtype!=torch.float16: targ = targ.float()
if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long()
if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1)
return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs)
def to(self, device:torch.device):
"Move the loss function to a specified `device`"
if isinstance(self.func, nn.Module): self.func.to(device)
# %% ../nbs/01a_losses.ipynb 8
@delegates()
class CrossEntropyLossFlat(BaseLoss):
"Same as `nn.CrossEntropyLoss`, but flattens input and target."
y_int = True # y interpolation
@use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean')
def __init__(self,
*args,
axis:int=-1, # Class axis
**kwargs
):
super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs)
def decodes(self, x:Tensor) -> Tensor:
"Converts model output to target format"
return x.argmax(dim=self.axis)
def activation(self, x:Tensor) -> Tensor:
"`nn.CrossEntropyLoss`'s fused activation function applied to model output"
return F.softmax(x, dim=self.axis)
# %% ../nbs/01a_losses.ipynb 13
class FocalLoss(Module):
y_int=True # y interpolation
def __init__(self,
gamma:float=2.0, # Focusing parameter. Higher values down-weight easy examples' contribution to loss
weight:Tensor=None, # Manual rescaling weight given to each class
reduction:str='mean' # PyTorch reduction to apply to the output
):
"Applies Focal Loss: https://arxiv.org/pdf/1708.02002.pdf"
store_attr()
def forward(self, inp:Tensor, targ:Tensor) -> Tensor:
"Applies focal loss based on https://arxiv.org/pdf/1708.02002.pdf"
ce_loss = F.cross_entropy(inp, targ, weight=self.weight, reduction="none")
p_t = torch.exp(-ce_loss)
loss = (1 - p_t)**self.gamma * ce_loss
if self.reduction == "mean":
loss = loss.mean()
elif self.reduction == "sum":
loss = loss.sum()
return loss
class FocalLossFlat(BaseLoss):
"""
Same as CrossEntropyLossFlat but with focal paramter, `gamma`. Focal loss is introduced by Lin et al.
https://arxiv.org/pdf/1708.02002.pdf. Note the class weighting factor in the paper, alpha, can be
implemented through pytorch `weight` argument passed through to F.cross_entropy.
"""
y_int = True # y interpolation
@use_kwargs_dict(keep=True, weight=None, reduction='mean')
def __init__(self,
*args,
gamma:float=2.0, # Focusing parameter. Higher values down-weight easy examples' contribution to loss
axis:int=-1, # Class axis
**kwargs
):
super().__init__(FocalLoss, *args, gamma=gamma, axis=axis, **kwargs)
def decodes(self, x:Tensor) -> Tensor:
"Converts model output to target format"
return x.argmax(dim=self.axis)
def activation(self, x:Tensor) -> Tensor:
"`F.cross_entropy`'s fused activation function applied to model output"
return F.softmax(x, dim=self.axis)
# %% ../nbs/01a_losses.ipynb 16
@delegates()
class BCEWithLogitsLossFlat(BaseLoss):
"Same as `nn.BCEWithLogitsLoss`, but flattens input and target."
@use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None)
def __init__(self,
*args,
axis:int=-1, # Class axis
floatify:bool=True, # Convert `targ` to `float`
thresh:float=0.5, # The threshold on which to predict
**kwargs
):
if kwargs.get('pos_weight', None) is not None and kwargs.get('flatten', None) is True:
raise ValueError("`flatten` must be False when using `pos_weight` to avoid a RuntimeError due to shape mismatch")
if kwargs.get('pos_weight', None) is not None: kwargs['flatten'] = False
super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
self.thresh = thresh
def decodes(self, x:Tensor) -> Tensor:
"Converts model output to target format"
return x>self.thresh
def activation(self, x:Tensor) -> Tensor:
"`nn.BCEWithLogitsLoss`'s fused activation function applied to model output"
return torch.sigmoid(x)
# %% ../nbs/01a_losses.ipynb 18
@use_kwargs_dict(weight=None, reduction='mean')
def BCELossFlat(
*args,
axis:int=-1, # Class axis
floatify:bool=True, # Convert `targ` to `float`
**kwargs
):
"Same as `nn.BCELoss`, but flattens input and target."
return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# %% ../nbs/01a_losses.ipynb 20
@use_kwargs_dict(reduction='mean')
def MSELossFlat(
*args,
axis:int=-1, # Class axis
floatify:bool=True, # Convert `targ` to `float`
**kwargs
):
"Same as `nn.MSELoss`, but flattens input and target."
return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# %% ../nbs/01a_losses.ipynb 23
@use_kwargs_dict(reduction='mean')
def L1LossFlat(
*args,
axis=-1, # Class axis
floatify=True, # Convert `targ` to `float`
**kwargs
):
"Same as `nn.L1Loss`, but flattens input and target."
return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# %% ../nbs/01a_losses.ipynb 24
class LabelSmoothingCrossEntropy(Module):
y_int = True # y interpolation
def __init__(self,
eps:float=0.1, # The weight for the interpolation formula
weight:Tensor=None, # Manual rescaling weight given to each class passed to `F.nll_loss`
reduction:str='mean' # PyTorch reduction to apply to the output
):
store_attr()
def forward(self, output:Tensor, target:Tensor) -> Tensor:
"Apply `F.log_softmax` on output then blend the loss/num_classes(`c`) with the `F.nll_loss`"
c = output.size()[1]
log_preds = F.log_softmax(output, dim=1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=1) #We divide by that size at the return line so sum and not mean
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), weight=self.weight, reduction=self.reduction)
def activation(self, out:Tensor) -> Tensor:
"`F.log_softmax`'s fused activation function applied to model output"
return F.softmax(out, dim=-1)
def decodes(self, out:Tensor) -> Tensor:
"Converts model output to target format"
return out.argmax(dim=-1)
# %% ../nbs/01a_losses.ipynb 27
@delegates()
class LabelSmoothingCrossEntropyFlat(BaseLoss):
"Same as `LabelSmoothingCrossEntropy`, but flattens input and target."
y_int = True
@use_kwargs_dict(keep=True, eps=0.1, reduction='mean')
def __init__(self,
*args,
axis:int=-1, # Class axis
**kwargs
):
super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs)
def activation(self, out:Tensor) -> Tensor:
"`LabelSmoothingCrossEntropy`'s fused activation function applied to model output"
return F.softmax(out, dim=-1)
def decodes(self, out:Tensor) -> Tensor:
"Converts model output to target format"
return out.argmax(dim=-1)
# %% ../nbs/01a_losses.ipynb 30
class DiceLoss:
"Dice loss for segmentation"
def __init__(self,
axis:int=1, # Class axis
smooth:float=1e-6, # Helps with numerical stabilities in the IoU division
reduction:str="sum", # PyTorch reduction to apply to the output
square_in_union:bool=False # Squares predictions to increase slope of gradients
):
store_attr()
def __call__(self, pred:Tensor, targ:Tensor) -> Tensor:
"One-hot encodes targ, then runs IoU calculation then takes 1-dice value"
targ = self._one_hot(targ, pred.shape[self.axis])
pred, targ = TensorBase(pred), TensorBase(targ)
assert pred.shape == targ.shape, 'input and target dimensions differ, DiceLoss expects non one-hot targs'
pred = self.activation(pred)
sum_dims = list(range(2, len(pred.shape)))
inter = torch.sum(pred*targ, dim=sum_dims)
union = (torch.sum(pred**2+targ, dim=sum_dims) if self.square_in_union
else torch.sum(pred+targ, dim=sum_dims))
dice_score = (2. * inter + self.smooth)/(union + self.smooth)
loss = 1- dice_score
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def _one_hot(
x:Tensor, # Non one-hot encoded targs
classes:int, # The number of classes
axis:int=1 # The axis to stack for encoding (class dimension)
) -> Tensor:
"Creates one binary mask per class"
return torch.stack([torch.where(x==c, 1, 0) for c in range(classes)], axis=axis)
def activation(self, x:Tensor) -> Tensor:
"Activation function applied to model output"
return F.softmax(x, dim=self.axis)
def decodes(self, x:Tensor) -> Tensor:
"Converts model output to target format"
return x.argmax(dim=self.axis)
| 11,450 | 40.043011 | 130 | py |
fastai | fastai-master/fastai/torch_core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_torch_core.ipynb.
# %% ../nbs/00_torch_core.ipynb 2
from __future__ import annotations
from .imports import *
from .torch_imports import *
from packaging.version import parse
# %% auto 0
__all__ = ['norm_types', 'setup_cuda', 'subplots', 'show_image', 'show_titled_image', 'show_images', 'ArrayBase',
'ArrayImageBase', 'ArrayImage', 'ArrayImageBW', 'ArrayMask', 'tensor', 'set_seed', 'get_random_states',
'set_random_states', 'no_random', 'unsqueeze', 'unsqueeze_', 'apply', 'maybe_gather', 'to_detach', 'to_half',
'to_float', 'default_device', 'to_device', 'to_cpu', 'to_np', 'to_concat', 'TensorBase', 'TensorImageBase',
'TensorImage', 'TensorImageBW', 'TensorMask', 'TensorFlowField', 'TensorCategory', 'TensorMultiCategory',
'TitledTensorScalar', 'concat', 'Chunks', 'show_title', 'ShowTitle', 'TitledInt', 'TitledFloat', 'TitledStr',
'TitledTuple', 'get_empty_df', 'display_df', 'get_first', 'one_param', 'item_find', 'find_device', 'find_bs',
'np_func', 'Module', 'get_model', 'one_hot', 'one_hot_decode', 'params', 'trainable_params',
'norm_bias_params', 'batch_to_samples', 'logit', 'num_distrib', 'rank_distrib', 'distrib_barrier',
'base_doc', 'doc', 'nested_reorder', 'flatten_check', 'make_cross_image', 'show_image_batch',
'requires_grad', 'init_default', 'cond_init', 'apply_leaf', 'apply_init', 'script_use_ctx',
'script_save_ctx', 'script_fwd', 'script_bwd', 'grad_module', 'ismin_torch', 'notmax_torch', 'progress_bar',
'master_bar']
# %% ../nbs/00_torch_core.ipynb 5
_all_ = ['progress_bar','master_bar']
# %% ../nbs/00_torch_core.ipynb 6
defaults.benchmark = True
# %% ../nbs/00_torch_core.ipynb 7
def setup_cuda(benchmark=defaults.benchmark):
"Sets the main cuda device and sets `cudnn.benchmark` to `benchmark`"
if torch.cuda.is_available():
if torch.cuda.current_device()==0:
def_gpu = int(os.environ.get('DEFAULT_GPU') or 0)
if torch.cuda.device_count()>=def_gpu: torch.cuda.set_device(def_gpu)
torch.backends.cudnn.benchmark = benchmark
# %% ../nbs/00_torch_core.ipynb 10
@delegates(plt.subplots, keep=True)
def subplots(
nrows:int=1, # Number of rows in returned axes grid
ncols:int=1, # Number of columns in returned axes grid
figsize:tuple=None, # Width, height in inches of the returned figure
imsize:int=3, # Size (in inches) of images that will be displayed in the returned figure
suptitle:str=None, # Title to be set to returned figure
**kwargs
) -> (plt.Figure, plt.Axes): # Returns both fig and ax as a tuple
"Returns a figure and set of subplots to display images of `imsize` inches"
if figsize is None:
h=nrows*imsize if suptitle is None or imsize>2 else nrows*imsize+0.6 #https://github.com/matplotlib/matplotlib/issues/5355
figsize=(ncols*imsize, h)
fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)
if suptitle is not None: fig.suptitle(suptitle)
if nrows*ncols==1: ax = array([ax])
return fig,ax
# %% ../nbs/00_torch_core.ipynb 13
def _fig_bounds(x):
r = x//32
return min(5, max(1,r))
# %% ../nbs/00_torch_core.ipynb 14
@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
"Show a PIL or PyTorch image on `ax`."
# Handle pytorch axis order
if hasattrs(im, ('data','cpu','permute')):
im = im.data.cpu()
if im.shape[0]<5: im=im.permute(1,2,0)
elif not isinstance(im,np.ndarray): im=array(im)
# Handle 1-channel images
if im.shape[-1]==1: im=im[...,0]
ax = ifnone(ax,ctx)
if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
if ax is None: _,ax = plt.subplots(figsize=figsize)
ax.imshow(im, **kwargs)
if title is not None: ax.set_title(title)
ax.axis('off')
return ax
# %% ../nbs/00_torch_core.ipynb 21
@delegates(show_image, keep=True)
def show_titled_image(o, **kwargs):
"Call `show_image` destructuring `o` to `(img,title)`"
show_image(o[0], title=str(o[1]), **kwargs)
# %% ../nbs/00_torch_core.ipynb 24
@delegates(subplots)
def show_images(ims, nrows=1, ncols=None, titles=None, **kwargs):
"Show all images `ims` as subplots with `rows` using `titles`."
if ncols is None: ncols = int(math.ceil(len(ims)/nrows))
if titles is None: titles = [None]*len(ims)
axs = subplots(nrows, ncols, **kwargs)[1].flat
for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t)
# %% ../nbs/00_torch_core.ipynb 27
class ArrayBase(ndarray):
"An `ndarray` that can modify casting behavior"
@classmethod
def _before_cast(cls, x): return x if isinstance(x,ndarray) else array(x)
# %% ../nbs/00_torch_core.ipynb 28
class ArrayImageBase(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# %% ../nbs/00_torch_core.ipynb 29
class ArrayImage(ArrayImageBase):
"An array representing an image"
pass
# %% ../nbs/00_torch_core.ipynb 30
class ArrayImageBW(ArrayImage):
"An array representing an image"
_show_args = {'cmap':'Greys'}
# %% ../nbs/00_torch_core.ipynb 31
class ArrayMask(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
# %% ../nbs/00_torch_core.ipynb 37
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
# %% ../nbs/00_torch_core.ipynb 38
def _array2tensor(x, requires_grad=False, pin_memory=False, **kwargs):
if x.dtype==np.uint16: x = x.astype(np.float32)
# windows default numpy int dtype is int32, while torch tensor default int dtype is int64
# https://github.com/numpy/numpy/issues/9464
if sys.platform == "win32" and x.dtype==int: x = x.astype(np.int64)
t = torch.as_tensor(x, **kwargs)
t.requires_grad_(requires_grad)
if pin_memory: t.pin_memory()
return t
# %% ../nbs/00_torch_core.ipynb 39
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list,numbers.Number))
else _array2tensor(x, **kwargs) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
# else as_tensor(array(x, **kwargs)) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
# %% ../nbs/00_torch_core.ipynb 42
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# %% ../nbs/00_torch_core.ipynb 47
def get_random_states():
"Gets states for `random`, `torch`, and `numpy` random number generators"
return {'random_state':random.getstate(),
'numpy_state':np.random.get_state(),
'torch_state':torch.get_rng_state(),
'torch_cuda_state':torch.cuda.get_rng_state_all(),
'torch_deterministic':torch.backends.cudnn.deterministic,
'torch_benchmark':torch.backends.cudnn.benchmark}
# %% ../nbs/00_torch_core.ipynb 48
def set_random_states(random_state,numpy_state,torch_state,torch_cuda_state,torch_deterministic,torch_benchmark):
"Set states for `random`, `torch`, and `numpy` random number generators"
random.setstate(random_state)
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
torch.cuda.set_rng_state_all(torch_cuda_state)
torch.backends.cudnn.deterministic=torch_deterministic
torch.backends.cudnn.benchmark=torch_benchmark
# %% ../nbs/00_torch_core.ipynb 53
@contextmanager
def no_random(seed=42,reproducible=True):
"Stores and retrieves state of random number generators. Sets random seed for `random`, `torch`, and `numpy`."
states = get_random_states()
set_seed(seed,reproducible=reproducible)
try:
yield #we are managing global variables
finally:
set_random_states(**states)
# %% ../nbs/00_torch_core.ipynb 59
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
# %% ../nbs/00_torch_core.ipynb 61
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
# %% ../nbs/00_torch_core.ipynb 63
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
# %% ../nbs/00_torch_core.ipynb 64
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
# %% ../nbs/00_torch_core.ipynb 65
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
# %% ../nbs/00_torch_core.ipynb 66
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
# %% ../nbs/00_torch_core.ipynb 68
def to_half(b):
"Recursively map floating point tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
# %% ../nbs/00_torch_core.ipynb 69
def to_float(b):
"Recursively map floating point tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
# %% ../nbs/00_torch_core.ipynb 70
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
# %% ../nbs/00_torch_core.ipynb 71
def _has_mps():
if nested_attr(torch, 'backends.mps.is_available', noop)(): return True
return getattr(torch, 'has_mps', False)
def default_device(use=-1):
"Return or set default device; `use_cuda`: -1 - CUDA/mps if available; True - error if not available; False - CPU"
if use == -1: use = defaults.use_cuda
else: defaults.use_cuda=use
if use is None:
if torch.cuda.is_available() or _has_mps(): use = True
if use:
if torch.cuda.is_available(): return torch.device(torch.cuda.current_device())
if _has_mps(): return torch.device('mps')
return torch.device('cpu')
# %% ../nbs/00_torch_core.ipynb 73
def to_device(b, device=None, non_blocking=False):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o):
if isinstance(o,Tensor): return o.to(device, non_blocking=non_blocking)
# if hasattr(o, "to_device"): return o.to_device(device)
return o
return apply(_inner, b)
# %% ../nbs/00_torch_core.ipynb 76
def to_cpu(b):
"Recursively map tensors in `b ` to the cpu."
return to_device(b,'cpu')
# %% ../nbs/00_torch_core.ipynb 78
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
# %% ../nbs/00_torch_core.ipynb 80
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
# %% ../nbs/00_torch_core.ipynb 84
# Parsed PyTorch versions for faster version checking
_torch_version = parse(torch.__version__)
_torch_20 = parse('2.0')
_torch_113 = parse('1.13')
_torch_112 = parse('1.12')
# %% ../nbs/00_torch_core.ipynb 85
@patch
def set_meta(self:Tensor, x, as_copy=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
# XXX: change to `deepcopy` once PyTorch 1.7.1 is out, and check nb 23 segmentation fit works
self.__dict__ = copy(x.__dict__) if as_copy else x.__dict__
# %% ../nbs/00_torch_core.ipynb 86
if not hasattr(torch,'as_subclass'): torch.as_subclass = torch.Tensor.as_subclass
# %% ../nbs/00_torch_core.ipynb 87
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
# %% ../nbs/00_torch_core.ipynb 90
def _torch_handled(args, opt, func):
if func not in opt: return False
for oks in opt[func]:
if all(isinstance(arg,ok) for arg,ok in zip(args,oks) if ok): return True
# %% ../nbs/00_torch_core.ipynb 91
# from https://github.com/pytorch/pytorch/blob/13c975684a220ec096216ec6468ccd0dc90ff50a/torch/_tensor.py#L34
def _rebuild_from_type(func, type, args, dict):
ret = func(*args).as_subclass(type)
ret.__dict__ = dict
return ret
# %% ../nbs/00_torch_core.ipynb 92
def _find_args(x):
x0 = x[0] if is_listy(x[0]) and x[0] else x
return [a for a in x0 if hasattr(a,'__dict__')]
# %% ../nbs/00_torch_core.ipynb 93
class TensorBase(Tensor):
"A `Tensor` which support subclass pickling, and maintains metadata when casting or after methods"
debug,_opt = False,defaultdict(list)
def __new__(cls, x, **kwargs):
res = cast(tensor(x), cls)
for k,v in kwargs.items(): setattr(res, k, v)
return res
@classmethod
def _before_cast(cls, x): return tensor(x)
def __repr__(self): return re.sub('tensor', self.__class__.__name__, super().__repr__())
def __reduce_ex__(self, proto):
if _torch_version >= _torch_20:
return super().__reduce_ex__(proto)
else:
torch.utils.hooks.warn_if_has_hooks(self)
args = (self.storage(), self.storage_offset(), tuple(self.size()), self.stride())
if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())
args = args + (self.requires_grad, OrderedDict())
f = torch._utils._rebuild_qtensor if self.is_quantized else torch._utils._rebuild_tensor_v2
return (_rebuild_from_type, (f, type(self), args, self.__dict__))
@classmethod
def register_func(cls, func, *oks): cls._opt[func].append(oks)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if cls.debug and func.__name__ not in ('__str__','__repr__'): print(func, types, args, kwargs)
if _torch_handled(args, cls._opt, func): types = (torch.Tensor,)
res = super().__torch_function__(func, types, args, ifnone(kwargs, {}))
dict_objs = _find_args(args) if args else _find_args(list(kwargs.values()))
if issubclass(type(res),TensorBase) and dict_objs: res.set_meta(dict_objs[0],as_copy=True)
elif dict_objs and is_listy(res): [r.set_meta(dict_objs[0],as_copy=True) for r in res if issubclass(type(r),TensorBase)]
return res
def new_tensor(self, size, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new_ones(self, data, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_ones(data, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new(self, x=None):
cls = type(self)
res = self.as_subclass(Tensor).new() if x is None else self.as_subclass(Tensor).new(x)
return res.as_subclass(cls)
def requires_grad_(self, requires_grad=True):
# Workaround https://github.com/pytorch/pytorch/issues/50219
self.requires_grad = requires_grad
return self
def clone(self, *, memory_format=None):
cls = type(self)
return self.as_subclass(Tensor).clone(memory_format=memory_format).as_subclass(cls)
def new_empty(self, size, *, dtype=None, layout=None, device=None, pin_memory=False, requires_grad=False):
cls = type(self)
if _torch_version < _torch_113 and layout is None:
layout = torch.strided
if _torch_version < _torch_112:
return super().new_empty(size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory, requires_grad=requires_grad)
return self.as_subclass(Tensor).new_empty(size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory, requires_grad=requires_grad).as_subclass(cls)
def new_empty(self, *size, dtype=None, layout=None, device=None, pin_memory=False, requires_grad=False):
cls = type(self)
if _torch_version < _torch_113 and layout is None:
layout = torch.strided
if _torch_version < _torch_112:
return super().new_empty(*size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory, requires_grad=requires_grad)
return self.as_subclass(Tensor).new_empty(*size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory, requires_grad=requires_grad).as_subclass(cls)
# %% ../nbs/00_torch_core.ipynb 106
class TensorImageBase(TensorBase):
_show_args = ArrayImageBase._show_args
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# %% ../nbs/00_torch_core.ipynb 107
class TensorImage(TensorImageBase): pass
# %% ../nbs/00_torch_core.ipynb 108
class TensorImageBW(TensorImage): _show_args = ArrayImageBW._show_args
# %% ../nbs/00_torch_core.ipynb 109
class TensorMask(TensorImageBase):
_show_args = ArrayMask._show_args
def show(self, ctx=None, **kwargs):
codes = getattr(self, 'codes', None)
if codes is not None: kwargs = merge({'vmin': 0, 'vmax': len(codes)}, kwargs)
return super().show(ctx=ctx, **kwargs)
# %% ../nbs/00_torch_core.ipynb 110
for o in Tensor.__getitem__, Tensor.__ne__,Tensor.__eq__,Tensor.add,Tensor.sub,Tensor.mul,Tensor.div,Tensor.__rsub__,Tensor.__radd__,Tensor.matmul,Tensor.bmm:
TensorBase.register_func(o, TensorMask, TensorImageBase)
TensorBase.register_func(o, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorMask, TensorImageBase)
# %% ../nbs/00_torch_core.ipynb 117
class TensorFlowField(TensorBase): pass
TensorImage.register_func(F.grid_sample, TensorImageBase, TensorFlowField)
# %% ../nbs/00_torch_core.ipynb 119
class TensorCategory(TensorBase): pass
TensorBase.register_func(Tensor.__getitem__, TensorImageBase, TensorCategory)
# %% ../nbs/00_torch_core.ipynb 121
class TensorMultiCategory(TensorCategory): pass
# %% ../nbs/00_torch_core.ipynb 122
class TitledTensorScalar(TensorBase):
"A tensor containing a scalar that has a `show` method"
def show(self, **kwargs): show_title(self.item(), **kwargs)
# %% ../nbs/00_torch_core.ipynb 124
@patch
def tensored(self:L):
"`mapped(tensor)`"
return self.map(tensor)
@patch
def stack(self:L, dim=0):
"Same as `torch.stack`"
return torch.stack(list(self.tensored()), dim=dim)
@patch
def cat (self:L, dim=0):
"Same as `torch.cat`"
return torch.cat (list(self.tensored()), dim=dim)
# %% ../nbs/00_torch_core.ipynb 133
def concat(*ls):
"Concatenate tensors, arrays, lists, or tuples"
if not len(ls): return []
it = ls[0]
if isinstance(it,torch.Tensor): res = torch.cat(ls)
elif isinstance(it,ndarray): res = np.concatenate(ls)
else:
res = itertools.chain.from_iterable(map(L,ls))
if isinstance(it,(tuple,list)): res = type(it)(res)
else: res = L(res)
return retain_type(res, it)
# %% ../nbs/00_torch_core.ipynb 135
class Chunks:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
# %% ../nbs/00_torch_core.ipynb 140
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = pd.concat([ax,pd.Series({label: o})])
return ax
# %% ../nbs/00_torch_core.ipynb 142
class ShowTitle:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledInt(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledFloat(Float, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledStr(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledTuple(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
# %% ../nbs/00_torch_core.ipynb 149
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# %% ../nbs/00_torch_core.ipynb 151
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
# %% ../nbs/00_torch_core.ipynb 152
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=None):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
# %% ../nbs/00_torch_core.ipynb 153
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
# %% ../nbs/00_torch_core.ipynb 154
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
# %% ../nbs/00_torch_core.ipynb 155
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
# %% ../nbs/00_torch_core.ipynb 156
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
# %% ../nbs/00_torch_core.ipynb 157
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
# %% ../nbs/00_torch_core.ipynb 158
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
# %% ../nbs/00_torch_core.ipynb 160
def find_bs(b):
"Recursively search the batch size of `b`."
res = item_find(b)
if not hasattr(res, "shape"): return len(b)
return res.shape[0]
# %% ../nbs/00_torch_core.ipynb 162
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
# %% ../nbs/00_torch_core.ipynb 166
class Module(nn.Module, metaclass=PrePostInitMeta):
"Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
def __pre_init__(self, *args, **kwargs): super().__init__()
def __init__(self): pass
# %% ../nbs/00_torch_core.ipynb 169
from torch.nn.parallel import DistributedDataParallel
# %% ../nbs/00_torch_core.ipynb 170
def get_model(model):
"Return the model maybe wrapped inside `model`."
return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
# %% ../nbs/00_torch_core.ipynb 171
def one_hot(x, c):
"One-hot encode `x` with `c` classes."
res = torch.zeros(c, dtype=torch.uint8)
if isinstance(x, Tensor) and x.numel()>0: res[x] = 1.
else: res[list(L(x, use_list=None))] = 1.
return res
# %% ../nbs/00_torch_core.ipynb 173
def one_hot_decode(x, vocab=None):
return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)
# %% ../nbs/00_torch_core.ipynb 175
def params(m):
"Return all parameters of `m`"
return [p for p in m.parameters()]
# %% ../nbs/00_torch_core.ipynb 176
def trainable_params(m):
"Return all trainable parameters of `m`"
return [p for p in m.parameters() if p.requires_grad]
# %% ../nbs/00_torch_core.ipynb 178
norm_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm)
# %% ../nbs/00_torch_core.ipynb 179
def norm_bias_params(m, with_bias=True):
"Return all bias and BatchNorm parameters"
if isinstance(m, norm_types): return L(m.parameters())
res = L(m.children()).map(norm_bias_params, with_bias=with_bias).concat()
if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias)
return res
# %% ../nbs/00_torch_core.ipynb 181
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return retain_types(list(b[:max_n]), [b])
else:
res = L(b).map(partial(batch_to_samples,max_n=max_n))
return retain_types(res.zip(), [b])
# %% ../nbs/00_torch_core.ipynb 183
@patch
def interp_1d(x:Tensor, xp, fp):
"Same as `np.interp`"
slopes = (fp[1:]-fp[:-1])/(xp[1:]-xp[:-1])
incx = fp[:-1] - (slopes*xp[:-1])
locs = (x[:,None]>=xp[None,:]).long().sum(1)-1
locs = locs.clamp(0,len(slopes)-1)
return slopes[locs]*x + incx[locs]
# %% ../nbs/00_torch_core.ipynb 185
@patch
def pca(x:Tensor, k=2):
"Compute PCA of `x` with `k` dimensions."
x = x-torch.mean(x,0)
U,S,V = torch.svd(x.t())
return torch.mm(x,U[:,:k])
# %% ../nbs/00_torch_core.ipynb 186
def logit(x):
"Logit of `x`, clamped to avoid inf."
x = x.clamp(1e-7, 1-1e-7)
return -(1/x-1).log()
# %% ../nbs/00_torch_core.ipynb 187
def num_distrib():
"Return the number of processes in distributed training (if applicable)."
return int(os.environ.get('WORLD_SIZE', 0))
# %% ../nbs/00_torch_core.ipynb 188
def rank_distrib():
"Return the distributed rank of this process (if applicable)."
return int(os.environ.get('RANK', 0))
# %% ../nbs/00_torch_core.ipynb 189
def distrib_barrier():
"Place a synchronization barrier in distributed training"
if num_distrib() > 1 and torch.distributed.is_initialized(): torch.distributed.barrier()
# %% ../nbs/00_torch_core.ipynb 191
# Saving arrays requires pytables - optional dependency
try: import tables
except: pass
# %% ../nbs/00_torch_core.ipynb 192
def _comp_filter(lib='lz4',lvl=3): return tables.Filters(complib=f'blosc:{lib}', complevel=lvl)
# %% ../nbs/00_torch_core.ipynb 193
@patch
def save_array(p:Path, o, complib='lz4', lvl=3):
"Save numpy array to a compressed `pytables` file, using compression level `lvl`"
if isinstance(o,Tensor): o = to_np(o)
with tables.open_file(p, mode='w', filters=_comp_filter(lib=complib,lvl=lvl)) as f: f.create_carray('/', 'data', obj=o)
# %% ../nbs/00_torch_core.ipynb 195
@patch
def load_array(p:Path):
"Save numpy array to a `pytables` file"
with tables.open_file(p, 'r') as f: return f.root.data.read()
# %% ../nbs/00_torch_core.ipynb 196
def base_doc(elt):
"Print a base documentation of `elt`"
name = getattr(elt, '__qualname__', getattr(elt, '__name__', ''))
print(f'{name}{inspect.signature(elt)}\n{inspect.getdoc(elt)}\n')
print('To get a prettier result with hyperlinks to source code and documentation, install nbdev: pip install nbdev')
# %% ../nbs/00_torch_core.ipynb 197
def doc(elt):
"Try to use doc form nbdev and fall back to `base_doc`"
try:
from nbdev.showdoc import doc
doc(elt)
except: base_doc(elt)
# %% ../nbs/00_torch_core.ipynb 198
def nested_reorder(t, idxs):
"Reorder all tensors in `t` using `idxs`"
if isinstance(t, (Tensor,L)): return t[idxs]
elif is_listy(t): return type(t)(nested_reorder(t_, idxs) for t_ in t)
if t is None: return t
raise TypeError(f"Expected tensor, tuple, list or L but got {type(t)}")
# %% ../nbs/00_torch_core.ipynb 200
def flatten_check(inp, targ):
"Check that `inp` and `targ` have the same number of elements and flatten them."
inp,targ = TensorBase(inp.contiguous()).view(-1),TensorBase(targ.contiguous()).view(-1)
test_eq(len(inp), len(targ))
return inp,targ
# %% ../nbs/00_torch_core.ipynb 203
def make_cross_image(bw=True):
"Create a tensor containing a cross image, either `bw` (True) or color"
if bw:
im = torch.zeros(5,5)
im[2,:] = 1.
im[:,2] = 1.
else:
im = torch.zeros(3,5,5)
im[0,2,:] = 1.
im[1,:,2] = 1.
return im
# %% ../nbs/00_torch_core.ipynb 206
def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):
"Display batch `b` in a grid of size `items` with `cols` width"
if items<cols: cols=items
rows = (items+cols-1) // cols
if figsize is None: figsize = (cols*3, rows*3)
fig,axs = plt.subplots(rows, cols, figsize=figsize)
for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)
# %% ../nbs/00_torch_core.ipynb 209
def requires_grad(m):
"Check if the first parameter of `m` requires grad or not"
ps = list(m.parameters())
return ps[0].requires_grad if len(ps)>0 else False
# %% ../nbs/00_torch_core.ipynb 211
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
# %% ../nbs/00_torch_core.ipynb 213
def cond_init(m, func):
"Apply `init_default` to `m` unless it's a batchnorm module"
if (not isinstance(m, norm_types)) and requires_grad(m): init_default(m, func)
# %% ../nbs/00_torch_core.ipynb 215
def apply_leaf(m, f):
"Apply `f` to children of `m`."
c = m.children()
if isinstance(m, nn.Module): f(m)
for l in c: apply_leaf(l,f)
# %% ../nbs/00_torch_core.ipynb 217
def apply_init(m, func=nn.init.kaiming_normal_):
"Initialize all non-batchnorm layers of `m` with `func`."
apply_leaf(m, partial(cond_init, func=func))
# %% ../nbs/00_torch_core.ipynb 220
def script_use_ctx(f):
"Decorator: create jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs): return sf(*args, *ctx.saved_variables, **kwargs)
return update_wrapper(_f,f)
# %% ../nbs/00_torch_core.ipynb 221
def script_save_ctx(static, *argidx):
"Decorator: create jit script and save args with indices `argidx` using `ctx.save_for_backward`"
def _dec(f):
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs):
if argidx:
save = [args[o] for o in argidx]
ctx.save_for_backward(*save)
if not argidx: args = [ctx]+args
return sf(*args, **kwargs)
if static: _f = staticmethod(_f)
return update_wrapper(_f,f)
return _dec
# %% ../nbs/00_torch_core.ipynb 222
def script_fwd(*argidx):
"Decorator: create static jit script and save args with indices `argidx` using `ctx.save_for_backward`"
return script_save_ctx(True, *argidx)
# %% ../nbs/00_torch_core.ipynb 223
def script_bwd(f):
"Decorator: create static jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
return staticmethod(script_use_ctx(f))
# %% ../nbs/00_torch_core.ipynb 224
def grad_module(cls):
"Decorator: convert `cls` into an autograd function"
class _c(nn.Module):
def forward(self, *args, **kwargs): return cls.apply(*args, **kwargs)
return _c
# %% ../nbs/00_torch_core.ipynb 226
def ismin_torch(min_version):
"Check if `torch.__version__` >= `min_version` using packaging.version"
return _torch_version >= parse(min_version)
# %% ../nbs/00_torch_core.ipynb 227
def notmax_torch(max_version):
"Check if `torch.__version__` < `max_version` using packaging.version"
return _torch_version < parse(max_version)
# %% ../nbs/00_torch_core.ipynb 229
# PyTorch 1.13 introduced a Tensor Subclass string formatting bug
# Workaround from pending PyTorch PR: https://github.com/pytorch/pytorch/pull/82766
if ismin_torch('1.13') and notmax_torch('1.14'):
from torch.overrides import has_torch_function_unary, handle_torch_function
@patch
def __format__(self:Tensor, format_spec):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
if self.dim() == 0 and not self.is_meta and issubclass(type(self), Tensor):
return self.item().__format__(format_spec)
return object.__format__(self, format_spec)
| 36,527 | 39.40708 | 168 | py |
fastai | fastai-master/fastai/torch_basics.py | from torch import multiprocessing
import platform,os
if platform.system()=='Darwin':
# Python 3.8 changed to 'spawn' but that doesn't work with PyTorch DataLoader w n_workers>0
multiprocessing.set_start_method('fork', force=True)
# workaround "OMP: Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized"
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
from .imports import *
from .torch_imports import *
from .torch_core import *
from .layers import *
from .losses import *
| 516 | 35.928571 | 106 | py |
fastai | fastai-master/fastai/torch_imports.py | import pandas as pd
import torch
from torch import as_tensor,Tensor,ByteTensor,LongTensor,FloatTensor,HalfTensor,DoubleTensor
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import SequentialSampler,RandomSampler,Sampler,BatchSampler
from torch.utils.data import IterableDataset,get_worker_info
from torch.utils.data._utils.collate import default_collate,default_convert
| 400 | 39.1 | 92 | py |
fastai | fastai-master/fastai/learner.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/13a_learner.ipynb.
# %% ../nbs/13a_learner.ipynb 2
from __future__ import annotations
from .data.all import *
from .optimizer import *
from .callback.core import *
import pickle,threading
from collections.abc import MutableSequence
# %% auto 0
__all__ = ['replacing_yield', 'mk_metric', 'save_model', 'load_model', 'SkipToEpoch', 'Learner', 'before_batch_cb',
'load_learner', 'Metric', 'AvgMetric', 'AvgLoss', 'AvgSmoothLoss', 'ValueMetric', 'Recorder', 'CastToTensor',
'CancelBackwardException', 'CancelStepException', 'CancelFitException', 'CancelEpochException',
'CancelTrainException', 'CancelValidException', 'CancelBatchException']
# %% ../nbs/13a_learner.ipynb 4
_all_ = ['CancelBackwardException', 'CancelStepException','CancelFitException','CancelEpochException',
'CancelTrainException','CancelValidException','CancelBatchException']
# %% ../nbs/13a_learner.ipynb 10
defaults.lr = 1e-3
# %% ../nbs/13a_learner.ipynb 11
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# %% ../nbs/13a_learner.ipynb 13
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
if isinstance(m,type): m = m()
return m if isinstance(m, Metric) else AvgMetric(m)
# %% ../nbs/13a_learner.ipynb 15
def save_model(file, model, opt, with_opt=True, pickle_protocol=2, **torch_save_kwargs):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if rank_distrib(): return # don't save if child proc
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file, pickle_protocol=pickle_protocol, **torch_save_kwargs)
# %% ../nbs/13a_learner.ipynb 17
def load_model(file, model, opt, with_opt=True, device=None, strict=True, **torch_load_kwargs):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device, **torch_load_kwargs)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and with_opt:
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# %% ../nbs/13a_learner.ipynb 19
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# %% ../nbs/13a_learner.ipynb 20
_before_epoch = [event.before_fit, event.before_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# %% ../nbs/13a_learner.ipynb 21
class _ConstantFunc():
"Returns a function that returns `o`"
def __init__(self, o): self.o = o
def __call__(self, *args, **kwargs): return self.o
# %% ../nbs/13a_learner.ipynb 22
class SkipToEpoch(Callback):
"Skip training up to `epoch`"
order = 70
def __init__(self, epoch:int):
self._skip_to = epoch
def before_epoch(self):
if self.epoch < self._skip_to:
raise CancelEpochException
# %% ../nbs/13a_learner.ipynb 24
_loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train',
'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step',
'after_step', 'after_cancel_batch', 'after_batch','End Batch Loop','End Train',
'after_cancel_train', 'after_train', 'Start Valid', 'before_validate','Start Batch Loop',
'**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate',
'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit',
'after_cancel_fit', 'after_fit']
# %% ../nbs/13a_learner.ipynb 25
class Learner(GetAttr):
_default='model'
def __init__(self,
dls:DataLoaders, # `DataLoaders` containing fastai or PyTorch `DataLoader`s
model:callable, # PyTorch model for training or inference
loss_func:callable|None=None, # Loss function. Defaults to `dls` loss
opt_func:Optimizer|OptimWrapper=Adam, # Optimization function for training
lr:float|slice=defaults.lr, # Default learning rate
splitter:callable=trainable_params, # Split model into parameter groups. Defaults to one parameter group
cbs:Callback|MutableSequence|None=None, # `Callback`s to add to `Learner`
metrics:callable|MutableSequence|None=None, # `Metric`s to calculate on validation set
path:str|Path|None=None, # Parent directory to save, load, and export models. Defaults to `dls` `path`
model_dir:str|Path='models', # Subdirectory to save and load models
wd:float|int|None=None, # Default weight decay
wd_bn_bias:bool=False, # Apply weight decay to normalization and bias parameters
train_bn:bool=True, # Train frozen normalization layers
moms:tuple=(0.95,0.85,0.95), # Default momentum for schedulers
default_cbs:bool=True # Include default `Callback`s
):
path = Path(path) if path is not None else getattr(dls, 'path', Path('.'))
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.dls,self.model = dls,model
store_attr(but='dls,model,cbs')
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
if default_cbs: self.add_cbs(L(defaults.callbacks))
self.add_cbs(cbs)
self.lock = threading.Lock()
self("after_create")
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls))
def add_cbs(self, cbs):
L(cbs).map(self.add_cb)
return self
def remove_cbs(self, cbs):
L(cbs).map(self.remove_cb)
return self
def add_cb(self, cb):
if isinstance(cb, type): cb = cb()
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
if isinstance(cb, type): self.remove_cbs(self._grab_cbs(cb))
else:
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
return self
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
try: yield
finally: self.remove_cbs(cbs)
@contextmanager
def removed_cbs(self, cbs):
self.remove_cbs(cbs)
try: yield self
finally: self.add_cbs(cbs)
def ordered_cbs(self, event): return [cb for cb in self.cbs.sorted('order') if hasattr(cb, event)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
if not hasattr(event, event_name): raise Exception(f'missing {event_name}')
for cb in self.cbs.sorted('order'): cb(event_name)
def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
if isinstance(self.opt_func, partial):
if 'lr' in self.opt_func.keywords:
self.lr = self.opt_func.keywords['lr']
if isinstance(self.opt_func, OptimWrapper):
self.opt = self.opt_func
self.opt.clear_state()
else:
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def _with_events(self, f, event_type, ex, final=noop):
try: self(f'before_{event_type}'); f()
except ex: self(f'after_cancel_{event_type}')
self(f'after_{event_type}'); final()
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def _backward(self): self.loss_grad.backward()
def _step(self): self.opt.step()
def _do_grad_opt(self):
self._with_events(self._backward, 'backward', CancelBackwardException)
self._with_events(self._step, 'step', CancelStepException)
self.opt.zero_grad()
def _do_one_batch(self):
self.pred = self.model(*self.xb)
self('after_pred')
if len(self.yb):
self.loss_grad = self.loss_func(self.pred, *self.yb)
self.loss = self.loss_grad.clone()
self('after_loss')
if not self.training or not len(self.yb): return
self._do_grad_opt()
def _set_device(self, b):
model_device = next(self.model.parameters()).device
dls_device = getattr(self.dls, 'device', default_device())
if model_device == dls_device: return to_device(b, dls_device)
else: return to_device(b, model_device)
def one_batch(self, i, b):
self.iter = i
b = self._set_device(b)
self._split(b)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
def _do_epoch_train(self):
self.dl = self.dls.train
self._with_events(self.all_batches, 'train', CancelTrainException)
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
self.dl = dl
with torch.no_grad(): self._with_events(self.all_batches, 'validate', CancelValidException)
def _do_epoch(self):
self._do_epoch_train()
self._do_epoch_validate()
def _do_fit(self):
for epoch in range(self.n_epoch):
self.epoch=epoch
self._with_events(self._do_epoch, 'epoch', CancelEpochException)
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False, start_epoch=0):
if start_epoch != 0:
cbs = L(cbs) + SkipToEpoch(start_epoch)
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
self.n_epoch = n_epoch
self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
def __enter__(self): self(_before_epoch); return self
def __exit__(self, exc_type, exc_value, tb): self(_after_epoch)
def validation_context(self, cbs=None, inner=False):
cms = [self.no_logging(),self.no_mbar(), self.lock]
if cbs: cms.append(self.added_cbs(cbs))
if not inner: cms.append(self)
return ContextManagers(cms)
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.validation_context(cbs=cbs): self._do_epoch_validate(ds_idx, dl)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self,
ds_idx:int=1, # `DataLoader` to use for predictions if `dl` is None. 0: train. 1: valid
dl=None, # `DataLoader` to use for predictions, defaults to `ds_idx=1` if None
with_input:bool=False, # Return inputs with predictions
with_decoded:bool=False, # Return decoded predictions
with_loss:bool=False, # Return per item loss with predictions
act=None, # Apply activation to predictions, defaults to `self.loss_func`'s activation
inner:bool=False, # If False, create progress bar, show logger, use temporary `cbs`
reorder:bool=True, # Reorder predictions on dataset indicies, if applicable
cbs:Callback|MutableSequence|None=None, # Temporary `Callback`s to apply during prediction
**kwargs
)-> tuple:
if dl is None: dl = self.dls[ds_idx].new(shuffle=False, drop_last=False)
else:
try: len(dl)
except TypeError as e:
raise TypeError(f"`dl` is {type(dl)} and doesn't have len(dl)")
if isinstance(dl, DataLoader):
if dl.drop_last: dl = dl.new(shuffle=False, drop_last=False)
if reorder and hasattr(dl, 'get_idxs'):
idxs = dl.get_idxs()
dl = dl.new(get_idxs = _ConstantFunc(idxs))
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
ctx_mgrs = self.validation_context(cbs=L(cbs)+[cb], inner=inner)
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ContextManagers(ctx_mgrs):
self._do_epoch_validate(dl=dl)
if act is None: act = getcallable(self.loss_func, 'activation')
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getcallable(self.loss_func, 'decodes')(res[pred_i]))
if reorder and hasattr(dl, 'get_idxs'): res = nested_reorder(res, tensor(idxs).argsort())
return tuple(res)
self._end_cleanup()
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
i = getattr(self.dls, 'n_inp', -1)
inp = (inp,) if i==1 else tuplify(inp)
dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0]
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
dl.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def to_detach(self,b,cpu=True,gather=True):
return self.dl.to_detach(b,cpu,gather) if hasattr(getattr(self,'dl',None),'to_detach') else to_detach(b,cpu,gather)
def __getstate__(self): return {k:v for k,v in self.__dict__.items() if k!='lock'}
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = threading.Lock()
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# %% ../nbs/13a_learner.ipynb 26
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
removed_cbs="Context manage that temporarily removes `cbs`",
ordered_cbs="List of `Callback`s, in order, for an `event` in the training loop",
create_opt="Create an optimizer with default hyper-parameters",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all the batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Prediction on `item`, fully decoded, loss function decoded and probabilities",
validation_context="A `ContextManagers` suitable for validation, with optional `cbs`",
show_results="Show some predictions on `ds_idx`-th dataset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
to_detach="Calls `to_detach` if `self.dl` provides a `.to_detach` function otherwise calls global `to_detach`",
__call__="Call `event_name` for all `Callback`s in `self.cbs`"
)
# %% ../nbs/13a_learner.ipynb 33
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# %% ../nbs/13a_learner.ipynb 88
def _before_batch_cb(f, self):
xb,yb = f(self, self.xb, self.yb)
self.learn.xb,self.learn.yb = xb,yb
# %% ../nbs/13a_learner.ipynb 89
def before_batch_cb(f):
"Shortcut for creating a Callback on the `before_batch` event, which takes and returns `xb,yb`"
return Callback(before_batch=partial(_before_batch_cb, f))
# %% ../nbs/13a_learner.ipynb 96
@patch
@delegates(save_model)
def save(self:Learner, file, **kwargs):
"Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`"
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), **kwargs)
return file
# %% ../nbs/13a_learner.ipynb 98
@patch
@delegates(load_model)
def load(self:Learner, file, device=None, **kwargs):
"Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
if device is None and hasattr(self.dls, 'device'): device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
distrib_barrier()
load_model(file, self.model, self.opt, device=device, **kwargs)
return self
# %% ../nbs/13a_learner.ipynb 102
@patch
def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if child proc
self._end_cleanup()
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict() if self.opt is not None else None
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname, pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.create_opt()
if state is not None: self.opt.load_state_dict(state)
self.dls = old_dbunch
# %% ../nbs/13a_learner.ipynb 104
def load_learner(fname, cpu=True, pickle_module=pickle):
"Load a `Learner` object in `fname`, by default putting it on the `cpu`"
distrib_barrier()
map_loc = 'cpu' if cpu else default_device()
try: res = torch.load(fname, map_location=map_loc, pickle_module=pickle_module)
except AttributeError as e:
e.args = [f"Custom classes or functions exported with your `Learner` not available in namespace.\Re-declare/import before loading:\n\t{e.args[0]}"]
raise
if cpu:
res.dls.cpu()
if hasattr(res, 'channels_last'): res = res.to_contiguous(to_fp32=True)
elif hasattr(res, 'mixed_precision'): res = res.to_fp32()
elif hasattr(res, 'non_native_mixed_precision'): res = res.to_non_native_fp32()
return res
# %% ../nbs/13a_learner.ipynb 111
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# %% ../nbs/13a_learner.ipynb 118
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# %% ../nbs/13a_learner.ipynb 122
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# %% ../nbs/13a_learner.ipynb 126
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean()), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# %% ../nbs/13a_learner.ipynb 129
class ValueMetric(Metric):
"Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`"
def __init__(self, func, metric_name=None): store_attr('func, metric_name')
@property
def value(self): return self.func()
@property
def name(self): return self.metric_name if self.metric_name else self.func.__name__
# %% ../nbs/13a_learner.ipynb 133
from fastprogress.fastprogress import format_time
# %% ../nbs/13a_learner.ipynb 134
def _maybe_item(t):
t = t.value
try: return t.item()
except: return t
# %% ../nbs/13a_learner.ipynb 135
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
_stateattrs=('lrs','iters','losses','values')
remove_on_fetch,order = True,50
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr('add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def before_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def before_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def before_train (self): self._train_mets[1:].map(Self.reset())
def before_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
valid_col = self.metric_names.index('valid_loss') - 1
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(valid_col), label='valid')
plt.legend()
# %% ../nbs/13a_learner.ipynb 136
add_docs(Recorder,
before_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
before_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder)
# %% ../nbs/13a_learner.ipynb 152
def _cast_tensor(x):
if isinstance(x, tuple): return tuple(_cast_tensor(x_) for x_ in x)
else: return cast(x, Tensor) if isinstance(x,torch.Tensor) else x
# %% ../nbs/13a_learner.ipynb 153
class CastToTensor(Callback):
"Cast Subclassed Tensors to `Tensor`"
order=9 # Right before MixedPrecision
def before_batch(self):
self.learn.xb,self.learn.yb = _cast_tensor(self.learn.xb),_cast_tensor(self.learn.yb)
# %% ../nbs/13a_learner.ipynb 155
if CastToTensor not in defaults.callbacks: defaults.callbacks.append(CastToTensor)
# %% ../nbs/13a_learner.ipynb 185
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# %% ../nbs/13a_learner.ipynb 189
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
try:
self(_before_epoch)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(dl=dl, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(dl=dl, inner=True)
finally: self(event.after_fit)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs
| 29,917 | 43.388724 | 155 | py |
fastai | fastai-master/fastai/_modidx.py | # Autogenerated by nbdev
d = { 'settings': { 'branch': 'master',
'doc_baseurl': '/',
'doc_host': 'https://docs.fast.ai',
'git_url': 'https://github.com/fastai/fastai',
'lib_path': 'fastai'},
'syms': { 'fastai.basics': {},
'fastai.callback.all': {},
'fastai.callback.azureml': {},
'fastai.callback.captum': { 'fastai.callback.captum.CaptumInterpretation': ( 'callback.captum.html#captuminterpretation',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation.__init__': ( 'callback.captum.html#captuminterpretation.__init__',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation._get_attributions': ( 'callback.captum.html#captuminterpretation._get_attributions',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation._get_enc_dec_data': ( 'callback.captum.html#captuminterpretation._get_enc_dec_data',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation._viz': ( 'callback.captum.html#captuminterpretation._viz',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation.get_baseline_img': ( 'callback.captum.html#captuminterpretation.get_baseline_img',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation.insights': ( 'callback.captum.html#captuminterpretation.insights',
'fastai/callback/captum.py'),
'fastai.callback.captum.CaptumInterpretation.visualize': ( 'callback.captum.html#captuminterpretation.visualize',
'fastai/callback/captum.py'),
'fastai.callback.captum.json_clean': ( 'callback.captum.html#json_clean',
'fastai/callback/captum.py')},
'fastai.callback.channelslast': { 'fastai.callback.channelslast.ChannelsLast': ( 'callback.channelslast.html#channelslast',
'fastai/callback/channelslast.py'),
'fastai.callback.channelslast.ChannelsLast.before_fit': ( 'callback.channelslast.html#channelslast.before_fit',
'fastai/callback/channelslast.py'),
'fastai.callback.channelslast.Learner.to_channelslast': ( 'callback.channelslast.html#learner.to_channelslast',
'fastai/callback/channelslast.py'),
'fastai.callback.channelslast.Learner.to_contiguous': ( 'callback.channelslast.html#learner.to_contiguous',
'fastai/callback/channelslast.py')},
'fastai.callback.comet': { 'fastai.callback.comet.CometCallback': ( 'callback.comet.html#cometcallback',
'fastai/callback/comet.py'),
'fastai.callback.comet.CometCallback.__init__': ( 'callback.comet.html#cometcallback.__init__',
'fastai/callback/comet.py'),
'fastai.callback.comet.CometCallback.after_batch': ( 'callback.comet.html#cometcallback.after_batch',
'fastai/callback/comet.py'),
'fastai.callback.comet.CometCallback.after_epoch': ( 'callback.comet.html#cometcallback.after_epoch',
'fastai/callback/comet.py'),
'fastai.callback.comet.CometCallback.after_fit': ( 'callback.comet.html#cometcallback.after_fit',
'fastai/callback/comet.py'),
'fastai.callback.comet.CometCallback.before_fit': ( 'callback.comet.html#cometcallback.before_fit',
'fastai/callback/comet.py')},
'fastai.callback.core': { 'fastai.callback.core.Callback': ('callback.core.html#callback', 'fastai/callback/core.py'),
'fastai.callback.core.Callback.__call__': ( 'callback.core.html#callback.__call__',
'fastai/callback/core.py'),
'fastai.callback.core.Callback.__init__': ( 'callback.core.html#callback.__init__',
'fastai/callback/core.py'),
'fastai.callback.core.Callback.__repr__': ( 'callback.core.html#callback.__repr__',
'fastai/callback/core.py'),
'fastai.callback.core.Callback.__setattr__': ( 'callback.core.html#callback.__setattr__',
'fastai/callback/core.py'),
'fastai.callback.core.Callback.name': ('callback.core.html#callback.name', 'fastai/callback/core.py'),
'fastai.callback.core.FetchPredsCallback': ( 'callback.core.html#fetchpredscallback',
'fastai/callback/core.py'),
'fastai.callback.core.FetchPredsCallback.__init__': ( 'callback.core.html#fetchpredscallback.__init__',
'fastai/callback/core.py'),
'fastai.callback.core.FetchPredsCallback.after_validate': ( 'callback.core.html#fetchpredscallback.after_validate',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback': ( 'callback.core.html#gatherpredscallback',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.__init__': ( 'callback.core.html#gatherpredscallback.__init__',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.after_batch': ( 'callback.core.html#gatherpredscallback.after_batch',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.after_validate': ( 'callback.core.html#gatherpredscallback.after_validate',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.all_tensors': ( 'callback.core.html#gatherpredscallback.all_tensors',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.before_batch': ( 'callback.core.html#gatherpredscallback.before_batch',
'fastai/callback/core.py'),
'fastai.callback.core.GatherPredsCallback.before_validate': ( 'callback.core.html#gatherpredscallback.before_validate',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback': ( 'callback.core.html#trainevalcallback',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback.after_batch': ( 'callback.core.html#trainevalcallback.after_batch',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback.after_create': ( 'callback.core.html#trainevalcallback.after_create',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback.before_fit': ( 'callback.core.html#trainevalcallback.before_fit',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback.before_train': ( 'callback.core.html#trainevalcallback.before_train',
'fastai/callback/core.py'),
'fastai.callback.core.TrainEvalCallback.before_validate': ( 'callback.core.html#trainevalcallback.before_validate',
'fastai/callback/core.py')},
'fastai.callback.data': { 'fastai.callback.data.CollectDataCallback': ( 'callback.data.html#collectdatacallback',
'fastai/callback/data.py'),
'fastai.callback.data.CollectDataCallback.after_batch': ( 'callback.data.html#collectdatacallback.after_batch',
'fastai/callback/data.py'),
'fastai.callback.data.CollectDataCallback.before_fit': ( 'callback.data.html#collectdatacallback.before_fit',
'fastai/callback/data.py'),
'fastai.callback.data.DataBlock.weighted_dataloaders': ( 'callback.data.html#datablock.weighted_dataloaders',
'fastai/callback/data.py'),
'fastai.callback.data.Datasets.weighted_dataloaders': ( 'callback.data.html#datasets.weighted_dataloaders',
'fastai/callback/data.py'),
'fastai.callback.data.FilteredBase.partial_dataloaders': ( 'callback.data.html#filteredbase.partial_dataloaders',
'fastai/callback/data.py'),
'fastai.callback.data.PartialDL': ('callback.data.html#partialdl', 'fastai/callback/data.py'),
'fastai.callback.data.PartialDL.__init__': ( 'callback.data.html#partialdl.__init__',
'fastai/callback/data.py'),
'fastai.callback.data.PartialDL.__len__': ( 'callback.data.html#partialdl.__len__',
'fastai/callback/data.py'),
'fastai.callback.data.PartialDL.get_idxs': ( 'callback.data.html#partialdl.get_idxs',
'fastai/callback/data.py'),
'fastai.callback.data.WeightedDL': ('callback.data.html#weighteddl', 'fastai/callback/data.py'),
'fastai.callback.data.WeightedDL.__init__': ( 'callback.data.html#weighteddl.__init__',
'fastai/callback/data.py'),
'fastai.callback.data.WeightedDL.get_idxs': ( 'callback.data.html#weighteddl.get_idxs',
'fastai/callback/data.py')},
'fastai.callback.fp16': { 'fastai.callback.fp16.FP16TestCallback': ( 'callback.fp16.html#fp16testcallback',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.FP16TestCallback.after_pred': ( 'callback.fp16.html#fp16testcallback.after_pred',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.Learner.to_fp16': ( 'callback.fp16.html#learner.to_fp16',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.Learner.to_fp32': ( 'callback.fp16.html#learner.to_fp32',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.Learner.to_non_native_fp16': ( 'callback.fp16.html#learner.to_non_native_fp16',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.Learner.to_non_native_fp32': ( 'callback.fp16.html#learner.to_non_native_fp32',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision': ( 'callback.fp16.html#mixedprecision',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.__init__': ( 'callback.fp16.html#mixedprecision.__init__',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.after_fit': ( 'callback.fp16.html#mixedprecision.after_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.after_loss': ( 'callback.fp16.html#mixedprecision.after_loss',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.after_pred': ( 'callback.fp16.html#mixedprecision.after_pred',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.after_step': ( 'callback.fp16.html#mixedprecision.after_step',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.before_backward': ( 'callback.fp16.html#mixedprecision.before_backward',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.before_batch': ( 'callback.fp16.html#mixedprecision.before_batch',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.before_fit': ( 'callback.fp16.html#mixedprecision.before_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.before_step': ( 'callback.fp16.html#mixedprecision.before_step',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.param_groups': ( 'callback.fp16.html#mixedprecision.param_groups',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.MixedPrecision.step': ( 'callback.fp16.html#mixedprecision.step',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.ModelToHalf': ('callback.fp16.html#modeltohalf', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.ModelToHalf.after_fit': ( 'callback.fp16.html#modeltohalf.after_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.ModelToHalf.before_fit': ( 'callback.fp16.html#modeltohalf.before_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision': ( 'callback.fp16.html#nonnativemixedprecision',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.__init__': ( 'callback.fp16.html#nonnativemixedprecision.__init__',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.after_batch': ( 'callback.fp16.html#nonnativemixedprecision.after_batch',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.after_fit': ( 'callback.fp16.html#nonnativemixedprecision.after_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.after_pred': ( 'callback.fp16.html#nonnativemixedprecision.after_pred',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.after_step': ( 'callback.fp16.html#nonnativemixedprecision.after_step',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.before_backward': ( 'callback.fp16.html#nonnativemixedprecision.before_backward',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.before_batch': ( 'callback.fp16.html#nonnativemixedprecision.before_batch',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.before_fit': ( 'callback.fp16.html#nonnativemixedprecision.before_fit',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.NonNativeMixedPrecision.before_step': ( 'callback.fp16.html#nonnativemixedprecision.before_step',
'fastai/callback/fp16.py'),
'fastai.callback.fp16._copy_state': ('callback.fp16.html#_copy_state', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.copy_clone': ('callback.fp16.html#copy_clone', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.get_master': ('callback.fp16.html#get_master', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.grad_overflow': ('callback.fp16.html#grad_overflow', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.test_overflow': ('callback.fp16.html#test_overflow', 'fastai/callback/fp16.py'),
'fastai.callback.fp16.to_master_grads': ( 'callback.fp16.html#to_master_grads',
'fastai/callback/fp16.py'),
'fastai.callback.fp16.to_model_params': ( 'callback.fp16.html#to_model_params',
'fastai/callback/fp16.py')},
'fastai.callback.hook': { 'fastai.callback.hook.ActivationStats': ( 'callback.hook.html#activationstats',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.__init__': ( 'callback.hook.html#activationstats.__init__',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats._flatten_tuple': ( 'callback.hook.html#activationstats._flatten_tuple',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.after_batch': ( 'callback.hook.html#activationstats.after_batch',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.before_fit': ( 'callback.hook.html#activationstats.before_fit',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.color_dim': ( 'callback.hook.html#activationstats.color_dim',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.hist': ( 'callback.hook.html#activationstats.hist',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.hook': ( 'callback.hook.html#activationstats.hook',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.hook_multi_ouput': ( 'callback.hook.html#activationstats.hook_multi_ouput',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.layer_stats': ( 'callback.hook.html#activationstats.layer_stats',
'fastai/callback/hook.py'),
'fastai.callback.hook.ActivationStats.plot_layer_stats': ( 'callback.hook.html#activationstats.plot_layer_stats',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hook': ('callback.hook.html#hook', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hook.__enter__': ( 'callback.hook.html#hook.__enter__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hook.__exit__': ('callback.hook.html#hook.__exit__', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hook.__init__': ('callback.hook.html#hook.__init__', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hook.hook_fn': ('callback.hook.html#hook.hook_fn', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hook.remove': ('callback.hook.html#hook.remove', 'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback': ('callback.hook.html#hookcallback', 'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.__del__': ( 'callback.hook.html#hookcallback.__del__',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.__init__': ( 'callback.hook.html#hookcallback.__init__',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback._register': ( 'callback.hook.html#hookcallback._register',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback._remove': ( 'callback.hook.html#hookcallback._remove',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.after_batch': ( 'callback.hook.html#hookcallback.after_batch',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.after_fit': ( 'callback.hook.html#hookcallback.after_fit',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.before_batch': ( 'callback.hook.html#hookcallback.before_batch',
'fastai/callback/hook.py'),
'fastai.callback.hook.HookCallback.before_fit': ( 'callback.hook.html#hookcallback.before_fit',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks': ('callback.hook.html#hooks', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__enter__': ( 'callback.hook.html#hooks.__enter__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__exit__': ( 'callback.hook.html#hooks.__exit__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__getitem__': ( 'callback.hook.html#hooks.__getitem__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__init__': ( 'callback.hook.html#hooks.__init__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__iter__': ( 'callback.hook.html#hooks.__iter__',
'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.__len__': ('callback.hook.html#hooks.__len__', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.remove': ('callback.hook.html#hooks.remove', 'fastai/callback/hook.py'),
'fastai.callback.hook.Hooks.stored': ('callback.hook.html#hooks.stored', 'fastai/callback/hook.py'),
'fastai.callback.hook.Learner.summary': ( 'callback.hook.html#learner.summary',
'fastai/callback/hook.py'),
'fastai.callback.hook._get_shapes': ('callback.hook.html#_get_shapes', 'fastai/callback/hook.py'),
'fastai.callback.hook._hook_inner': ('callback.hook.html#_hook_inner', 'fastai/callback/hook.py'),
'fastai.callback.hook._print_shapes': ('callback.hook.html#_print_shapes', 'fastai/callback/hook.py'),
'fastai.callback.hook.dummy_eval': ('callback.hook.html#dummy_eval', 'fastai/callback/hook.py'),
'fastai.callback.hook.has_params': ('callback.hook.html#has_params', 'fastai/callback/hook.py'),
'fastai.callback.hook.hook_output': ('callback.hook.html#hook_output', 'fastai/callback/hook.py'),
'fastai.callback.hook.hook_outputs': ('callback.hook.html#hook_outputs', 'fastai/callback/hook.py'),
'fastai.callback.hook.layer_info': ('callback.hook.html#layer_info', 'fastai/callback/hook.py'),
'fastai.callback.hook.model_sizes': ('callback.hook.html#model_sizes', 'fastai/callback/hook.py'),
'fastai.callback.hook.module_summary': ( 'callback.hook.html#module_summary',
'fastai/callback/hook.py'),
'fastai.callback.hook.num_features_model': ( 'callback.hook.html#num_features_model',
'fastai/callback/hook.py'),
'fastai.callback.hook.total_params': ('callback.hook.html#total_params', 'fastai/callback/hook.py')},
'fastai.callback.mixup': { 'fastai.callback.mixup.CutMix': ('callback.mixup.html#cutmix', 'fastai/callback/mixup.py'),
'fastai.callback.mixup.CutMix.__init__': ( 'callback.mixup.html#cutmix.__init__',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.CutMix.before_batch': ( 'callback.mixup.html#cutmix.before_batch',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.CutMix.rand_bbox': ( 'callback.mixup.html#cutmix.rand_bbox',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler': ('callback.mixup.html#mixhandler', 'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.__init__': ( 'callback.mixup.html#mixhandler.__init__',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.after_cancel_fit': ( 'callback.mixup.html#mixhandler.after_cancel_fit',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.after_cancel_train': ( 'callback.mixup.html#mixhandler.after_cancel_train',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.after_train': ( 'callback.mixup.html#mixhandler.after_train',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.before_train': ( 'callback.mixup.html#mixhandler.before_train',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixHandler.lf': ( 'callback.mixup.html#mixhandler.lf',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixUp': ('callback.mixup.html#mixup', 'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixUp.__init__': ( 'callback.mixup.html#mixup.__init__',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.MixUp.before_batch': ( 'callback.mixup.html#mixup.before_batch',
'fastai/callback/mixup.py'),
'fastai.callback.mixup.reduce_loss': ( 'callback.mixup.html#reduce_loss',
'fastai/callback/mixup.py')},
'fastai.callback.neptune': { 'fastai.callback.neptune.NeptuneCallback': ( 'callback.neptune.html#neptunecallback',
'fastai/callback/neptune.py'),
'fastai.callback.neptune.NeptuneCallback.__init__': ( 'callback.neptune.html#neptunecallback.__init__',
'fastai/callback/neptune.py'),
'fastai.callback.neptune.NeptuneCallback.after_batch': ( 'callback.neptune.html#neptunecallback.after_batch',
'fastai/callback/neptune.py'),
'fastai.callback.neptune.NeptuneCallback.after_epoch': ( 'callback.neptune.html#neptunecallback.after_epoch',
'fastai/callback/neptune.py'),
'fastai.callback.neptune.NeptuneCallback.after_fit': ( 'callback.neptune.html#neptunecallback.after_fit',
'fastai/callback/neptune.py'),
'fastai.callback.neptune.NeptuneCallback.before_fit': ( 'callback.neptune.html#neptunecallback.before_fit',
'fastai/callback/neptune.py')},
'fastai.callback.preds': { 'fastai.callback.preds.MCDropoutCallback': ( 'callback.preds.html#mcdropoutcallback',
'fastai/callback/preds.py'),
'fastai.callback.preds.MCDropoutCallback.after_validate': ( 'callback.preds.html#mcdropoutcallback.after_validate',
'fastai/callback/preds.py'),
'fastai.callback.preds.MCDropoutCallback.before_validate': ( 'callback.preds.html#mcdropoutcallback.before_validate',
'fastai/callback/preds.py')},
'fastai.callback.progress': { 'fastai.callback.progress.CSVLogger': ( 'callback.progress.html#csvlogger',
'fastai/callback/progress.py'),
'fastai.callback.progress.CSVLogger.__init__': ( 'callback.progress.html#csvlogger.__init__',
'fastai/callback/progress.py'),
'fastai.callback.progress.CSVLogger._write_line': ( 'callback.progress.html#csvlogger._write_line',
'fastai/callback/progress.py'),
'fastai.callback.progress.CSVLogger.after_fit': ( 'callback.progress.html#csvlogger.after_fit',
'fastai/callback/progress.py'),
'fastai.callback.progress.CSVLogger.before_fit': ( 'callback.progress.html#csvlogger.before_fit',
'fastai/callback/progress.py'),
'fastai.callback.progress.CSVLogger.read_log': ( 'callback.progress.html#csvlogger.read_log',
'fastai/callback/progress.py'),
'fastai.callback.progress.Learner.no_bar': ( 'callback.progress.html#learner.no_bar',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback': ( 'callback.progress.html#progresscallback',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback._launch_pbar': ( 'callback.progress.html#progresscallback._launch_pbar',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback._write_stats': ( 'callback.progress.html#progresscallback._write_stats',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.after_batch': ( 'callback.progress.html#progresscallback.after_batch',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.after_fit': ( 'callback.progress.html#progresscallback.after_fit',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.after_train': ( 'callback.progress.html#progresscallback.after_train',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.after_validate': ( 'callback.progress.html#progresscallback.after_validate',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.before_epoch': ( 'callback.progress.html#progresscallback.before_epoch',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.before_fit': ( 'callback.progress.html#progresscallback.before_fit',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.before_train': ( 'callback.progress.html#progresscallback.before_train',
'fastai/callback/progress.py'),
'fastai.callback.progress.ProgressCallback.before_validate': ( 'callback.progress.html#progresscallback.before_validate',
'fastai/callback/progress.py'),
'fastai.callback.progress.ShowGraphCallback': ( 'callback.progress.html#showgraphcallback',
'fastai/callback/progress.py'),
'fastai.callback.progress.ShowGraphCallback.after_epoch': ( 'callback.progress.html#showgraphcallback.after_epoch',
'fastai/callback/progress.py'),
'fastai.callback.progress.ShowGraphCallback.after_train': ( 'callback.progress.html#showgraphcallback.after_train',
'fastai/callback/progress.py'),
'fastai.callback.progress.ShowGraphCallback.before_fit': ( 'callback.progress.html#showgraphcallback.before_fit',
'fastai/callback/progress.py')},
'fastai.callback.rnn': { 'fastai.callback.rnn.ModelResetter': ('callback.rnn.html#modelresetter', 'fastai/callback/rnn.py'),
'fastai.callback.rnn.ModelResetter.after_fit': ( 'callback.rnn.html#modelresetter.after_fit',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.ModelResetter.before_train': ( 'callback.rnn.html#modelresetter.before_train',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.ModelResetter.before_validate': ( 'callback.rnn.html#modelresetter.before_validate',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.RNNCallback': ('callback.rnn.html#rnncallback', 'fastai/callback/rnn.py'),
'fastai.callback.rnn.RNNCallback.after_pred': ( 'callback.rnn.html#rnncallback.after_pred',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.RNNRegularizer': ('callback.rnn.html#rnnregularizer', 'fastai/callback/rnn.py'),
'fastai.callback.rnn.RNNRegularizer.__init__': ( 'callback.rnn.html#rnnregularizer.__init__',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.RNNRegularizer.after_loss': ( 'callback.rnn.html#rnnregularizer.after_loss',
'fastai/callback/rnn.py'),
'fastai.callback.rnn.rnn_cbs': ('callback.rnn.html#rnn_cbs', 'fastai/callback/rnn.py')},
'fastai.callback.schedule': { 'fastai.callback.schedule.LRFinder': ( 'callback.schedule.html#lrfinder',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.__init__': ( 'callback.schedule.html#lrfinder.__init__',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.after_batch': ( 'callback.schedule.html#lrfinder.after_batch',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.after_fit': ( 'callback.schedule.html#lrfinder.after_fit',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.before_batch': ( 'callback.schedule.html#lrfinder.before_batch',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.before_fit': ( 'callback.schedule.html#lrfinder.before_fit',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.LRFinder.before_validate': ( 'callback.schedule.html#lrfinder.before_validate',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Learner.fine_tune': ( 'callback.schedule.html#learner.fine_tune',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Learner.fit_flat_cos': ( 'callback.schedule.html#learner.fit_flat_cos',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Learner.fit_one_cycle': ( 'callback.schedule.html#learner.fit_one_cycle',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Learner.fit_sgdr': ( 'callback.schedule.html#learner.fit_sgdr',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Learner.lr_find': ( 'callback.schedule.html#learner.lr_find',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler': ( 'callback.schedule.html#paramscheduler',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler.__init__': ( 'callback.schedule.html#paramscheduler.__init__',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler._update_val': ( 'callback.schedule.html#paramscheduler._update_val',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler.after_batch': ( 'callback.schedule.html#paramscheduler.after_batch',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler.after_fit': ( 'callback.schedule.html#paramscheduler.after_fit',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler.before_batch': ( 'callback.schedule.html#paramscheduler.before_batch',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.ParamScheduler.before_fit': ( 'callback.schedule.html#paramscheduler.before_fit',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Recorder.plot_lr_find': ( 'callback.schedule.html#recorder.plot_lr_find',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.Recorder.plot_sched': ( 'callback.schedule.html#recorder.plot_sched',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.SchedCos': ( 'callback.schedule.html#schedcos',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.SchedExp': ( 'callback.schedule.html#schedexp',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.SchedLin': ( 'callback.schedule.html#schedlin',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.SchedNo': ( 'callback.schedule.html#schedno',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.SchedPoly': ( 'callback.schedule.html#schedpoly',
'fastai/callback/schedule.py'),
'fastai.callback.schedule._Annealer': ( 'callback.schedule.html#_annealer',
'fastai/callback/schedule.py'),
'fastai.callback.schedule._Annealer.__call__': ( 'callback.schedule.html#_annealer.__call__',
'fastai/callback/schedule.py'),
'fastai.callback.schedule._Annealer.__init__': ( 'callback.schedule.html#_annealer.__init__',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.annealer': ( 'callback.schedule.html#annealer',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.combine_scheds': ( 'callback.schedule.html#combine_scheds',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.combined_cos': ( 'callback.schedule.html#combined_cos',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.minimum': ( 'callback.schedule.html#minimum',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.sched_cos': ( 'callback.schedule.html#sched_cos',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.sched_exp': ( 'callback.schedule.html#sched_exp',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.sched_lin': ( 'callback.schedule.html#sched_lin',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.sched_no': ( 'callback.schedule.html#sched_no',
'fastai/callback/schedule.py'),
'fastai.callback.schedule.slide': ('callback.schedule.html#slide', 'fastai/callback/schedule.py'),
'fastai.callback.schedule.steep': ('callback.schedule.html#steep', 'fastai/callback/schedule.py'),
'fastai.callback.schedule.valley': ( 'callback.schedule.html#valley',
'fastai/callback/schedule.py')},
'fastai.callback.tensorboard': { 'fastai.callback.tensorboard.TensorBoardBaseCallback': ( 'callback.tensorboard.html#tensorboardbasecallback',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback.__del__': ( 'callback.tensorboard.html#tensorboardbasecallback.__del__',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback.__init__': ( 'callback.tensorboard.html#tensorboardbasecallback.__init__',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback._remove': ( 'callback.tensorboard.html#tensorboardbasecallback._remove',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback._setup_projector': ( 'callback.tensorboard.html#tensorboardbasecallback._setup_projector',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback._setup_writer': ( 'callback.tensorboard.html#tensorboardbasecallback._setup_writer',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback.after_fit': ( 'callback.tensorboard.html#tensorboardbasecallback.after_fit',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback.after_pred': ( 'callback.tensorboard.html#tensorboardbasecallback.after_pred',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardBaseCallback.after_validate': ( 'callback.tensorboard.html#tensorboardbasecallback.after_validate',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback': ( 'callback.tensorboard.html#tensorboardcallback',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback.__init__': ( 'callback.tensorboard.html#tensorboardcallback.__init__',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback.after_batch': ( 'callback.tensorboard.html#tensorboardcallback.after_batch',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback.after_epoch': ( 'callback.tensorboard.html#tensorboardcallback.after_epoch',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback.before_fit': ( 'callback.tensorboard.html#tensorboardcallback.before_fit',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardCallback.before_validate': ( 'callback.tensorboard.html#tensorboardcallback.before_validate',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardProjectorCallback': ( 'callback.tensorboard.html#tensorboardprojectorcallback',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardProjectorCallback.__init__': ( 'callback.tensorboard.html#tensorboardprojectorcallback.__init__',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardProjectorCallback.before_fit': ( 'callback.tensorboard.html#tensorboardprojectorcallback.before_fit',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.TensorBoardProjectorCallback.before_validate': ( 'callback.tensorboard.html#tensorboardprojectorcallback.before_validate',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard._add_projector_features': ( 'callback.tensorboard.html#_add_projector_features',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard._get_embeddings': ( 'callback.tensorboard.html#_get_embeddings',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard._normalize_for_projector': ( 'callback.tensorboard.html#_normalize_for_projector',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard._write_projector_embedding': ( 'callback.tensorboard.html#_write_projector_embedding',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.projector_word_embeddings': ( 'callback.tensorboard.html#projector_word_embeddings',
'fastai/callback/tensorboard.py'),
'fastai.callback.tensorboard.tensorboard_log': ( 'callback.tensorboard.html#tensorboard_log',
'fastai/callback/tensorboard.py')},
'fastai.callback.tracker': { 'fastai.callback.tracker.EarlyStoppingCallback': ( 'callback.tracker.html#earlystoppingcallback',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.EarlyStoppingCallback.__init__': ( 'callback.tracker.html#earlystoppingcallback.__init__',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.EarlyStoppingCallback.after_epoch': ( 'callback.tracker.html#earlystoppingcallback.after_epoch',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.EarlyStoppingCallback.before_fit': ( 'callback.tracker.html#earlystoppingcallback.before_fit',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.ReduceLROnPlateau': ( 'callback.tracker.html#reducelronplateau',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.ReduceLROnPlateau.__init__': ( 'callback.tracker.html#reducelronplateau.__init__',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.ReduceLROnPlateau.after_epoch': ( 'callback.tracker.html#reducelronplateau.after_epoch',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.ReduceLROnPlateau.before_fit': ( 'callback.tracker.html#reducelronplateau.before_fit',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.SaveModelCallback': ( 'callback.tracker.html#savemodelcallback',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.SaveModelCallback.__init__': ( 'callback.tracker.html#savemodelcallback.__init__',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.SaveModelCallback._save': ( 'callback.tracker.html#savemodelcallback._save',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.SaveModelCallback.after_epoch': ( 'callback.tracker.html#savemodelcallback.after_epoch',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.SaveModelCallback.after_fit': ( 'callback.tracker.html#savemodelcallback.after_fit',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TerminateOnNaNCallback': ( 'callback.tracker.html#terminateonnancallback',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TerminateOnNaNCallback.after_batch': ( 'callback.tracker.html#terminateonnancallback.after_batch',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TrackerCallback': ( 'callback.tracker.html#trackercallback',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TrackerCallback.__init__': ( 'callback.tracker.html#trackercallback.__init__',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TrackerCallback.after_epoch': ( 'callback.tracker.html#trackercallback.after_epoch',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TrackerCallback.after_fit': ( 'callback.tracker.html#trackercallback.after_fit',
'fastai/callback/tracker.py'),
'fastai.callback.tracker.TrackerCallback.before_fit': ( 'callback.tracker.html#trackercallback.before_fit',
'fastai/callback/tracker.py')},
'fastai.callback.training': { 'fastai.callback.training.BnFreeze': ( 'callback.training.html#bnfreeze',
'fastai/callback/training.py'),
'fastai.callback.training.BnFreeze.before_train': ( 'callback.training.html#bnfreeze.before_train',
'fastai/callback/training.py'),
'fastai.callback.training.GradientAccumulation': ( 'callback.training.html#gradientaccumulation',
'fastai/callback/training.py'),
'fastai.callback.training.GradientAccumulation.__init__': ( 'callback.training.html#gradientaccumulation.__init__',
'fastai/callback/training.py'),
'fastai.callback.training.GradientAccumulation.after_loss': ( 'callback.training.html#gradientaccumulation.after_loss',
'fastai/callback/training.py'),
'fastai.callback.training.GradientAccumulation.before_fit': ( 'callback.training.html#gradientaccumulation.before_fit',
'fastai/callback/training.py'),
'fastai.callback.training.GradientAccumulation.before_step': ( 'callback.training.html#gradientaccumulation.before_step',
'fastai/callback/training.py'),
'fastai.callback.training.GradientClip': ( 'callback.training.html#gradientclip',
'fastai/callback/training.py'),
'fastai.callback.training.GradientClip.__init__': ( 'callback.training.html#gradientclip.__init__',
'fastai/callback/training.py'),
'fastai.callback.training.GradientClip.before_step': ( 'callback.training.html#gradientclip.before_step',
'fastai/callback/training.py'),
'fastai.callback.training.ShortEpochCallback': ( 'callback.training.html#shortepochcallback',
'fastai/callback/training.py'),
'fastai.callback.training.ShortEpochCallback.__init__': ( 'callback.training.html#shortepochcallback.__init__',
'fastai/callback/training.py'),
'fastai.callback.training.ShortEpochCallback.after_batch': ( 'callback.training.html#shortepochcallback.after_batch',
'fastai/callback/training.py'),
'fastai.callback.training.set_bn_eval': ( 'callback.training.html#set_bn_eval',
'fastai/callback/training.py')},
'fastai.callback.wandb': { 'fastai.callback.wandb.Learner.gather_args': ( 'callback.wandb.html#learner.gather_args',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback': ( 'callback.wandb.html#wandbcallback',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.__init__': ( 'callback.wandb.html#wandbcallback.__init__',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.after_batch': ( 'callback.wandb.html#wandbcallback.after_batch',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.after_create': ( 'callback.wandb.html#wandbcallback.after_create',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.after_epoch': ( 'callback.wandb.html#wandbcallback.after_epoch',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.after_fit': ( 'callback.wandb.html#wandbcallback.after_fit',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.before_batch': ( 'callback.wandb.html#wandbcallback.before_batch',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.before_fit': ( 'callback.wandb.html#wandbcallback.before_fit',
'fastai/callback/wandb.py'),
'fastai.callback.wandb.WandbCallback.log_predictions': ( 'callback.wandb.html#wandbcallback.log_predictions',
'fastai/callback/wandb.py'),
'fastai.callback.wandb._format_config': ( 'callback.wandb.html#_format_config',
'fastai/callback/wandb.py'),
'fastai.callback.wandb._format_config_value': ( 'callback.wandb.html#_format_config_value',
'fastai/callback/wandb.py'),
'fastai.callback.wandb._format_metadata': ( 'callback.wandb.html#_format_metadata',
'fastai/callback/wandb.py'),
'fastai.callback.wandb._make_plt': ('callback.wandb.html#_make_plt', 'fastai/callback/wandb.py'),
'fastai.callback.wandb._unlist': ('callback.wandb.html#_unlist', 'fastai/callback/wandb.py'),
'fastai.callback.wandb.log_dataset': ('callback.wandb.html#log_dataset', 'fastai/callback/wandb.py'),
'fastai.callback.wandb.log_model': ('callback.wandb.html#log_model', 'fastai/callback/wandb.py'),
'fastai.callback.wandb.wandb_process': ( 'callback.wandb.html#wandb_process',
'fastai/callback/wandb.py')},
'fastai.collab': { 'fastai.collab.CollabDataLoaders': ('collab.html#collabdataloaders', 'fastai/collab.py'),
'fastai.collab.CollabDataLoaders.from_csv': ('collab.html#collabdataloaders.from_csv', 'fastai/collab.py'),
'fastai.collab.CollabDataLoaders.from_df': ('collab.html#collabdataloaders.from_df', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias': ('collab.html#embeddingdotbias', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias.__init__': ('collab.html#embeddingdotbias.__init__', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias._get_idx': ('collab.html#embeddingdotbias._get_idx', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias.bias': ('collab.html#embeddingdotbias.bias', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias.forward': ('collab.html#embeddingdotbias.forward', 'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias.from_classes': ( 'collab.html#embeddingdotbias.from_classes',
'fastai/collab.py'),
'fastai.collab.EmbeddingDotBias.weight': ('collab.html#embeddingdotbias.weight', 'fastai/collab.py'),
'fastai.collab.EmbeddingNN': ('collab.html#embeddingnn', 'fastai/collab.py'),
'fastai.collab.EmbeddingNN.__init__': ('collab.html#embeddingnn.__init__', 'fastai/collab.py'),
'fastai.collab.TabularCollab': ('collab.html#tabularcollab', 'fastai/collab.py'),
'fastai.collab.collab_learner': ('collab.html#collab_learner', 'fastai/collab.py')},
'fastai.data.all': {},
'fastai.data.block': { 'fastai.data.block.CategoryBlock': ('data.block.html#categoryblock', 'fastai/data/block.py'),
'fastai.data.block.DataBlock': ('data.block.html#datablock', 'fastai/data/block.py'),
'fastai.data.block.DataBlock.__init__': ('data.block.html#datablock.__init__', 'fastai/data/block.py'),
'fastai.data.block.DataBlock._combine_type_tfms': ( 'data.block.html#datablock._combine_type_tfms',
'fastai/data/block.py'),
'fastai.data.block.DataBlock.dataloaders': ( 'data.block.html#datablock.dataloaders',
'fastai/data/block.py'),
'fastai.data.block.DataBlock.datasets': ('data.block.html#datablock.datasets', 'fastai/data/block.py'),
'fastai.data.block.DataBlock.from_columns': ( 'data.block.html#datablock.from_columns',
'fastai/data/block.py'),
'fastai.data.block.DataBlock.new': ('data.block.html#datablock.new', 'fastai/data/block.py'),
'fastai.data.block.DataBlock.summary': ('data.block.html#datablock.summary', 'fastai/data/block.py'),
'fastai.data.block.MultiCategoryBlock': ('data.block.html#multicategoryblock', 'fastai/data/block.py'),
'fastai.data.block.RegressionBlock': ('data.block.html#regressionblock', 'fastai/data/block.py'),
'fastai.data.block.TransformBlock': ('data.block.html#transformblock', 'fastai/data/block.py'),
'fastai.data.block.TransformBlock.__init__': ( 'data.block.html#transformblock.__init__',
'fastai/data/block.py'),
'fastai.data.block._apply_pipeline': ('data.block.html#_apply_pipeline', 'fastai/data/block.py'),
'fastai.data.block._find_fail_collate': ('data.block.html#_find_fail_collate', 'fastai/data/block.py'),
'fastai.data.block._merge_grouper': ('data.block.html#_merge_grouper', 'fastai/data/block.py'),
'fastai.data.block._merge_tfms': ('data.block.html#_merge_tfms', 'fastai/data/block.py'),
'fastai.data.block._short_repr': ('data.block.html#_short_repr', 'fastai/data/block.py'),
'fastai.data.block._zip': ('data.block.html#_zip', 'fastai/data/block.py')},
'fastai.data.core': { 'fastai.data.core.DataLoaders': ('data.core.html#dataloaders', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.__getitem__': ( 'data.core.html#dataloaders.__getitem__',
'fastai/data/core.py'),
'fastai.data.core.DataLoaders.__init__': ('data.core.html#dataloaders.__init__', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.__len__': ('data.core.html#dataloaders.__len__', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders._add_tfms': ('data.core.html#dataloaders._add_tfms', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders._set': ('data.core.html#dataloaders._set', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.add_tfms': ('data.core.html#dataloaders.add_tfms', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.cpu': ('data.core.html#dataloaders.cpu', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.cuda': ('data.core.html#dataloaders.cuda', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.device': ('data.core.html#dataloaders.device', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.from_dblock': ( 'data.core.html#dataloaders.from_dblock',
'fastai/data/core.py'),
'fastai.data.core.DataLoaders.from_dsets': ( 'data.core.html#dataloaders.from_dsets',
'fastai/data/core.py'),
'fastai.data.core.DataLoaders.new_empty': ('data.core.html#dataloaders.new_empty', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.test_dl': ('data.core.html#dataloaders.test_dl', 'fastai/data/core.py'),
'fastai.data.core.DataLoaders.to': ('data.core.html#dataloaders.to', 'fastai/data/core.py'),
'fastai.data.core.Datasets': ('data.core.html#datasets', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__dir__': ('data.core.html#datasets.__dir__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__getattr__': ('data.core.html#datasets.__getattr__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__getitem__': ('data.core.html#datasets.__getitem__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__init__': ('data.core.html#datasets.__init__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__iter__': ('data.core.html#datasets.__iter__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__len__': ('data.core.html#datasets.__len__', 'fastai/data/core.py'),
'fastai.data.core.Datasets.__repr__': ('data.core.html#datasets.__repr__', 'fastai/data/core.py'),
'fastai.data.core.Datasets._new': ('data.core.html#datasets._new', 'fastai/data/core.py'),
'fastai.data.core.Datasets.decode': ('data.core.html#datasets.decode', 'fastai/data/core.py'),
'fastai.data.core.Datasets.items': ('data.core.html#datasets.items', 'fastai/data/core.py'),
'fastai.data.core.Datasets.new_empty': ('data.core.html#datasets.new_empty', 'fastai/data/core.py'),
'fastai.data.core.Datasets.overlapping_splits': ( 'data.core.html#datasets.overlapping_splits',
'fastai/data/core.py'),
'fastai.data.core.Datasets.set_split_idx': ( 'data.core.html#datasets.set_split_idx',
'fastai/data/core.py'),
'fastai.data.core.Datasets.show': ('data.core.html#datasets.show', 'fastai/data/core.py'),
'fastai.data.core.Datasets.split_idx': ('data.core.html#datasets.split_idx', 'fastai/data/core.py'),
'fastai.data.core.Datasets.splits': ('data.core.html#datasets.splits', 'fastai/data/core.py'),
'fastai.data.core.Datasets.subset': ('data.core.html#datasets.subset', 'fastai/data/core.py'),
'fastai.data.core.FilteredBase': ('data.core.html#filteredbase', 'fastai/data/core.py'),
'fastai.data.core.FilteredBase.__init__': ('data.core.html#filteredbase.__init__', 'fastai/data/core.py'),
'fastai.data.core.FilteredBase._new': ('data.core.html#filteredbase._new', 'fastai/data/core.py'),
'fastai.data.core.FilteredBase.dataloaders': ( 'data.core.html#filteredbase.dataloaders',
'fastai/data/core.py'),
'fastai.data.core.FilteredBase.n_subsets': ( 'data.core.html#filteredbase.n_subsets',
'fastai/data/core.py'),
'fastai.data.core.FilteredBase.subset': ('data.core.html#filteredbase.subset', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL': ('data.core.html#tfmddl', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.__init__': ('data.core.html#tfmddl.__init__', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL._decode_batch': ('data.core.html#tfmddl._decode_batch', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL._one_pass': ('data.core.html#tfmddl._one_pass', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL._pre_show_batch': ( 'data.core.html#tfmddl._pre_show_batch',
'fastai/data/core.py'),
'fastai.data.core.TfmdDL._retain_dl': ('data.core.html#tfmddl._retain_dl', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.before_iter': ('data.core.html#tfmddl.before_iter', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.decode': ('data.core.html#tfmddl.decode', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.decode_batch': ('data.core.html#tfmddl.decode_batch', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.n_inp': ('data.core.html#tfmddl.n_inp', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.new': ('data.core.html#tfmddl.new', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.show_batch': ('data.core.html#tfmddl.show_batch', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.show_results': ('data.core.html#tfmddl.show_results', 'fastai/data/core.py'),
'fastai.data.core.TfmdDL.to': ('data.core.html#tfmddl.to', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists': ('data.core.html#tfmdlists', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.__call__': ('data.core.html#tfmdlists.__call__', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.__getitem__': ('data.core.html#tfmdlists.__getitem__', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.__init__': ('data.core.html#tfmdlists.__init__', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.__iter__': ('data.core.html#tfmdlists.__iter__', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.__repr__': ('data.core.html#tfmdlists.__repr__', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists._after_item': ('data.core.html#tfmdlists._after_item', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists._new': ('data.core.html#tfmdlists._new', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.decode': ('data.core.html#tfmdlists.decode', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.infer': ('data.core.html#tfmdlists.infer', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.infer_idx': ('data.core.html#tfmdlists.infer_idx', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.new_empty': ('data.core.html#tfmdlists.new_empty', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.overlapping_splits': ( 'data.core.html#tfmdlists.overlapping_splits',
'fastai/data/core.py'),
'fastai.data.core.TfmdLists.setup': ('data.core.html#tfmdlists.setup', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.show': ('data.core.html#tfmdlists.show', 'fastai/data/core.py'),
'fastai.data.core.TfmdLists.subset': ('data.core.html#tfmdlists.subset', 'fastai/data/core.py'),
'fastai.data.core.decode_at': ('data.core.html#decode_at', 'fastai/data/core.py'),
'fastai.data.core.show_at': ('data.core.html#show_at', 'fastai/data/core.py'),
'fastai.data.core.show_batch': ('data.core.html#show_batch', 'fastai/data/core.py'),
'fastai.data.core.show_results': ('data.core.html#show_results', 'fastai/data/core.py'),
'fastai.data.core.test_set': ('data.core.html#test_set', 'fastai/data/core.py')},
'fastai.data.download_checks': {},
'fastai.data.external': { 'fastai.data.external.URLs': ('data.external.html#urls', 'fastai/data/external.py'),
'fastai.data.external.URLs.path': ('data.external.html#urls.path', 'fastai/data/external.py'),
'fastai.data.external.fastai_cfg': ('data.external.html#fastai_cfg', 'fastai/data/external.py'),
'fastai.data.external.fastai_path': ('data.external.html#fastai_path', 'fastai/data/external.py'),
'fastai.data.external.untar_data': ('data.external.html#untar_data', 'fastai/data/external.py')},
'fastai.data.load': { 'fastai.data.load.DataLoader': ('data.load.html#dataloader', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.__init__': ('data.load.html#dataloader.__init__', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.__iter__': ('data.load.html#dataloader.__iter__', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.__len__': ('data.load.html#dataloader.__len__', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.chunkify': ('data.load.html#dataloader.chunkify', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.create_batch': ( 'data.load.html#dataloader.create_batch',
'fastai/data/load.py'),
'fastai.data.load.DataLoader.create_batches': ( 'data.load.html#dataloader.create_batches',
'fastai/data/load.py'),
'fastai.data.load.DataLoader.create_item': ( 'data.load.html#dataloader.create_item',
'fastai/data/load.py'),
'fastai.data.load.DataLoader.do_batch': ('data.load.html#dataloader.do_batch', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.do_item': ('data.load.html#dataloader.do_item', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.get_idxs': ('data.load.html#dataloader.get_idxs', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.new': ('data.load.html#dataloader.new', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.one_batch': ('data.load.html#dataloader.one_batch', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.prebatched': ('data.load.html#dataloader.prebatched', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.randomize': ('data.load.html#dataloader.randomize', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.retain': ('data.load.html#dataloader.retain', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.sample': ('data.load.html#dataloader.sample', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.shuffle_fn': ('data.load.html#dataloader.shuffle_fn', 'fastai/data/load.py'),
'fastai.data.load.DataLoader.to': ('data.load.html#dataloader.to', 'fastai/data/load.py'),
'fastai.data.load.SkipItemException': ('data.load.html#skipitemexception', 'fastai/data/load.py'),
'fastai.data.load._FakeLoader': ('data.load.html#_fakeloader', 'fastai/data/load.py'),
'fastai.data.load._FakeLoader.__init__': ('data.load.html#_fakeloader.__init__', 'fastai/data/load.py'),
'fastai.data.load._FakeLoader.__iter__': ('data.load.html#_fakeloader.__iter__', 'fastai/data/load.py'),
'fastai.data.load._FakeLoader._fn_noops': ('data.load.html#_fakeloader._fn_noops', 'fastai/data/load.py'),
'fastai.data.load._FakeLoader.multiprocessing_context': ( 'data.load.html#_fakeloader.multiprocessing_context',
'fastai/data/load.py'),
'fastai.data.load._FakeLoader.no_multiproc': ( 'data.load.html#_fakeloader.no_multiproc',
'fastai/data/load.py'),
'fastai.data.load._wif': ('data.load.html#_wif', 'fastai/data/load.py'),
'fastai.data.load.collate_error': ('data.load.html#collate_error', 'fastai/data/load.py'),
'fastai.data.load.fa_collate': ('data.load.html#fa_collate', 'fastai/data/load.py'),
'fastai.data.load.fa_convert': ('data.load.html#fa_convert', 'fastai/data/load.py')},
'fastai.data.transforms': { 'fastai.data.transforms.AttrGetter': ( 'data.transforms.html#attrgetter',
'fastai/data/transforms.py'),
'fastai.data.transforms.AttrGetter.__init__': ( 'data.transforms.html#attrgetter.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.AttrGetter.encodes': ( 'data.transforms.html#attrgetter.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.Categorize': ( 'data.transforms.html#categorize',
'fastai/data/transforms.py'),
'fastai.data.transforms.Categorize.__init__': ( 'data.transforms.html#categorize.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.Categorize.decodes': ( 'data.transforms.html#categorize.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.Categorize.encodes': ( 'data.transforms.html#categorize.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.Categorize.setups': ( 'data.transforms.html#categorize.setups',
'fastai/data/transforms.py'),
'fastai.data.transforms.Category': ('data.transforms.html#category', 'fastai/data/transforms.py'),
'fastai.data.transforms.CategoryMap': ( 'data.transforms.html#categorymap',
'fastai/data/transforms.py'),
'fastai.data.transforms.CategoryMap.__eq__': ( 'data.transforms.html#categorymap.__eq__',
'fastai/data/transforms.py'),
'fastai.data.transforms.CategoryMap.__init__': ( 'data.transforms.html#categorymap.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.CategoryMap.map_ids': ( 'data.transforms.html#categorymap.map_ids',
'fastai/data/transforms.py'),
'fastai.data.transforms.CategoryMap.map_objs': ( 'data.transforms.html#categorymap.map_objs',
'fastai/data/transforms.py'),
'fastai.data.transforms.ColReader': ('data.transforms.html#colreader', 'fastai/data/transforms.py'),
'fastai.data.transforms.ColReader.__call__': ( 'data.transforms.html#colreader.__call__',
'fastai/data/transforms.py'),
'fastai.data.transforms.ColReader.__init__': ( 'data.transforms.html#colreader.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.ColReader._do_one': ( 'data.transforms.html#colreader._do_one',
'fastai/data/transforms.py'),
'fastai.data.transforms.ColSplitter': ( 'data.transforms.html#colsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.EncodedMultiCategorize': ( 'data.transforms.html#encodedmulticategorize',
'fastai/data/transforms.py'),
'fastai.data.transforms.EncodedMultiCategorize.__init__': ( 'data.transforms.html#encodedmulticategorize.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.EncodedMultiCategorize.decodes': ( 'data.transforms.html#encodedmulticategorize.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.EncodedMultiCategorize.encodes': ( 'data.transforms.html#encodedmulticategorize.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.EndSplitter': ( 'data.transforms.html#endsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.FileGetter': ( 'data.transforms.html#filegetter',
'fastai/data/transforms.py'),
'fastai.data.transforms.FileSplitter': ( 'data.transforms.html#filesplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.FuncSplitter': ( 'data.transforms.html#funcsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.GrandparentSplitter': ( 'data.transforms.html#grandparentsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.ImageGetter': ( 'data.transforms.html#imagegetter',
'fastai/data/transforms.py'),
'fastai.data.transforms.IndexSplitter': ( 'data.transforms.html#indexsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.IntToFloatTensor': ( 'data.transforms.html#inttofloattensor',
'fastai/data/transforms.py'),
'fastai.data.transforms.IntToFloatTensor.__init__': ( 'data.transforms.html#inttofloattensor.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.IntToFloatTensor.decodes': ( 'data.transforms.html#inttofloattensor.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.IntToFloatTensor.encodes': ( 'data.transforms.html#inttofloattensor.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.ItemGetter': ( 'data.transforms.html#itemgetter',
'fastai/data/transforms.py'),
'fastai.data.transforms.ItemGetter.__init__': ( 'data.transforms.html#itemgetter.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.ItemGetter.encodes': ( 'data.transforms.html#itemgetter.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.MaskSplitter': ( 'data.transforms.html#masksplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategorize': ( 'data.transforms.html#multicategorize',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategorize.__init__': ( 'data.transforms.html#multicategorize.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategorize.decodes': ( 'data.transforms.html#multicategorize.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategorize.encodes': ( 'data.transforms.html#multicategorize.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategorize.setups': ( 'data.transforms.html#multicategorize.setups',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategory': ( 'data.transforms.html#multicategory',
'fastai/data/transforms.py'),
'fastai.data.transforms.MultiCategory.show': ( 'data.transforms.html#multicategory.show',
'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize': ('data.transforms.html#normalize', 'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize.__init__': ( 'data.transforms.html#normalize.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize.decodes': ( 'data.transforms.html#normalize.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize.encodes': ( 'data.transforms.html#normalize.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize.from_stats': ( 'data.transforms.html#normalize.from_stats',
'fastai/data/transforms.py'),
'fastai.data.transforms.Normalize.setups': ( 'data.transforms.html#normalize.setups',
'fastai/data/transforms.py'),
'fastai.data.transforms.OneHotEncode': ( 'data.transforms.html#onehotencode',
'fastai/data/transforms.py'),
'fastai.data.transforms.OneHotEncode.__init__': ( 'data.transforms.html#onehotencode.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.OneHotEncode.decodes': ( 'data.transforms.html#onehotencode.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.OneHotEncode.encodes': ( 'data.transforms.html#onehotencode.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.OneHotEncode.setups': ( 'data.transforms.html#onehotencode.setups',
'fastai/data/transforms.py'),
'fastai.data.transforms.RandomSplitter': ( 'data.transforms.html#randomsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.RandomSubsetSplitter': ( 'data.transforms.html#randomsubsetsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegexLabeller': ( 'data.transforms.html#regexlabeller',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegexLabeller.__call__': ( 'data.transforms.html#regexlabeller.__call__',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegexLabeller.__init__': ( 'data.transforms.html#regexlabeller.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegressionSetup': ( 'data.transforms.html#regressionsetup',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegressionSetup.__init__': ( 'data.transforms.html#regressionsetup.__init__',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegressionSetup.decodes': ( 'data.transforms.html#regressionsetup.decodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegressionSetup.encodes': ( 'data.transforms.html#regressionsetup.encodes',
'fastai/data/transforms.py'),
'fastai.data.transforms.RegressionSetup.setups': ( 'data.transforms.html#regressionsetup.setups',
'fastai/data/transforms.py'),
'fastai.data.transforms.ToTensor': ('data.transforms.html#totensor', 'fastai/data/transforms.py'),
'fastai.data.transforms.TrainTestSplitter': ( 'data.transforms.html#traintestsplitter',
'fastai/data/transforms.py'),
'fastai.data.transforms._get_files': ( 'data.transforms.html#_get_files',
'fastai/data/transforms.py'),
'fastai.data.transforms._grandparent_idxs': ( 'data.transforms.html#_grandparent_idxs',
'fastai/data/transforms.py'),
'fastai.data.transforms.broadcast_vec': ( 'data.transforms.html#broadcast_vec',
'fastai/data/transforms.py'),
'fastai.data.transforms.get_c': ('data.transforms.html#get_c', 'fastai/data/transforms.py'),
'fastai.data.transforms.get_files': ('data.transforms.html#get_files', 'fastai/data/transforms.py'),
'fastai.data.transforms.get_image_files': ( 'data.transforms.html#get_image_files',
'fastai/data/transforms.py'),
'fastai.data.transforms.get_text_files': ( 'data.transforms.html#get_text_files',
'fastai/data/transforms.py'),
'fastai.data.transforms.parent_label': ( 'data.transforms.html#parent_label',
'fastai/data/transforms.py')},
'fastai.distributed': { 'fastai.distributed.DataParallel.reset': ( 'distributed.html#dataparallel.reset',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL': ('distributed.html#distributeddl', 'fastai/distributed.py'),
'fastai.distributed.DistributedDL.__init__': ( 'distributed.html#distributeddl.__init__',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.__len__': ( 'distributed.html#distributeddl.__len__',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL._broadcast': ( 'distributed.html#distributeddl._broadcast',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL._to_detach': ( 'distributed.html#distributeddl._to_detach',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.after_batch': ( 'distributed.html#distributeddl.after_batch',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.after_iter': ( 'distributed.html#distributeddl.after_iter',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.before_iter': ( 'distributed.html#distributeddl.before_iter',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.create_batches': ( 'distributed.html#distributeddl.create_batches',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.get_idxs': ( 'distributed.html#distributeddl.get_idxs',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.randomize': ( 'distributed.html#distributeddl.randomize',
'fastai/distributed.py'),
'fastai.distributed.DistributedDL.to_detach': ( 'distributed.html#distributeddl.to_detach',
'fastai/distributed.py'),
'fastai.distributed.DistributedDataParallel.reset': ( 'distributed.html#distributeddataparallel.reset',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer': ( 'distributed.html#distributedtrainer',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer.__init__': ( 'distributed.html#distributedtrainer.__init__',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer._backward': ( 'distributed.html#distributedtrainer._backward',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer._wrap_dl': ( 'distributed.html#distributedtrainer._wrap_dl',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer.after_fit': ( 'distributed.html#distributedtrainer.after_fit',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer.before_fit': ( 'distributed.html#distributedtrainer.before_fit',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer.before_train': ( 'distributed.html#distributedtrainer.before_train',
'fastai/distributed.py'),
'fastai.distributed.DistributedTrainer.before_validate': ( 'distributed.html#distributedtrainer.before_validate',
'fastai/distributed.py'),
'fastai.distributed.Learner.detach_distributed': ( 'distributed.html#learner.detach_distributed',
'fastai/distributed.py'),
'fastai.distributed.Learner.detach_parallel': ( 'distributed.html#learner.detach_parallel',
'fastai/distributed.py'),
'fastai.distributed.Learner.distrib_ctx': ( 'distributed.html#learner.distrib_ctx',
'fastai/distributed.py'),
'fastai.distributed.Learner.parallel_ctx': ( 'distributed.html#learner.parallel_ctx',
'fastai/distributed.py'),
'fastai.distributed.Learner.to_distributed': ( 'distributed.html#learner.to_distributed',
'fastai/distributed.py'),
'fastai.distributed.Learner.to_parallel': ( 'distributed.html#learner.to_parallel',
'fastai/distributed.py'),
'fastai.distributed.ParallelTrainer': ('distributed.html#paralleltrainer', 'fastai/distributed.py'),
'fastai.distributed.ParallelTrainer.__init__': ( 'distributed.html#paralleltrainer.__init__',
'fastai/distributed.py'),
'fastai.distributed.ParallelTrainer.after_fit': ( 'distributed.html#paralleltrainer.after_fit',
'fastai/distributed.py'),
'fastai.distributed.ParallelTrainer.before_fit': ( 'distributed.html#paralleltrainer.before_fit',
'fastai/distributed.py'),
'fastai.distributed._round_to_multiple': ( 'distributed.html#_round_to_multiple',
'fastai/distributed.py'),
'fastai.distributed.rank0_first': ('distributed.html#rank0_first', 'fastai/distributed.py'),
'fastai.distributed.setup_distrib': ('distributed.html#setup_distrib', 'fastai/distributed.py'),
'fastai.distributed.teardown_distrib': ('distributed.html#teardown_distrib', 'fastai/distributed.py')},
'fastai.fp16_utils': {},
'fastai.imports': {},
'fastai.interpret': { 'fastai.interpret.ClassificationInterpretation': ( 'interpret.html#classificationinterpretation',
'fastai/interpret.py'),
'fastai.interpret.ClassificationInterpretation.__init__': ( 'interpret.html#classificationinterpretation.__init__',
'fastai/interpret.py'),
'fastai.interpret.ClassificationInterpretation.confusion_matrix': ( 'interpret.html#classificationinterpretation.confusion_matrix',
'fastai/interpret.py'),
'fastai.interpret.ClassificationInterpretation.most_confused': ( 'interpret.html#classificationinterpretation.most_confused',
'fastai/interpret.py'),
'fastai.interpret.ClassificationInterpretation.plot_confusion_matrix': ( 'interpret.html#classificationinterpretation.plot_confusion_matrix',
'fastai/interpret.py'),
'fastai.interpret.ClassificationInterpretation.print_classification_report': ( 'interpret.html#classificationinterpretation.print_classification_report',
'fastai/interpret.py'),
'fastai.interpret.Interpretation': ('interpret.html#interpretation', 'fastai/interpret.py'),
'fastai.interpret.Interpretation.__getitem__': ( 'interpret.html#interpretation.__getitem__',
'fastai/interpret.py'),
'fastai.interpret.Interpretation.__init__': ( 'interpret.html#interpretation.__init__',
'fastai/interpret.py'),
'fastai.interpret.Interpretation.from_learner': ( 'interpret.html#interpretation.from_learner',
'fastai/interpret.py'),
'fastai.interpret.Interpretation.plot_top_losses': ( 'interpret.html#interpretation.plot_top_losses',
'fastai/interpret.py'),
'fastai.interpret.Interpretation.show_results': ( 'interpret.html#interpretation.show_results',
'fastai/interpret.py'),
'fastai.interpret.Interpretation.top_losses': ( 'interpret.html#interpretation.top_losses',
'fastai/interpret.py'),
'fastai.interpret.SegmentationInterpretation': ( 'interpret.html#segmentationinterpretation',
'fastai/interpret.py'),
'fastai.interpret.plot_top_losses': ('interpret.html#plot_top_losses', 'fastai/interpret.py')},
'fastai.layers': { 'fastai.layers.AdaptiveAvgPool': ('layers.html#adaptiveavgpool', 'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool1d': ('layers.html#adaptiveconcatpool1d', 'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool1d.__init__': ( 'layers.html#adaptiveconcatpool1d.__init__',
'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool1d.forward': ( 'layers.html#adaptiveconcatpool1d.forward',
'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool2d': ('layers.html#adaptiveconcatpool2d', 'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool2d.__init__': ( 'layers.html#adaptiveconcatpool2d.__init__',
'fastai/layers.py'),
'fastai.layers.AdaptiveConcatPool2d.forward': ( 'layers.html#adaptiveconcatpool2d.forward',
'fastai/layers.py'),
'fastai.layers.AvgPool': ('layers.html#avgpool', 'fastai/layers.py'),
'fastai.layers.BatchNorm': ('layers.html#batchnorm', 'fastai/layers.py'),
'fastai.layers.BatchNorm1dFlat': ('layers.html#batchnorm1dflat', 'fastai/layers.py'),
'fastai.layers.BatchNorm1dFlat.forward': ('layers.html#batchnorm1dflat.forward', 'fastai/layers.py'),
'fastai.layers.Cat': ('layers.html#cat', 'fastai/layers.py'),
'fastai.layers.Cat.__init__': ('layers.html#cat.__init__', 'fastai/layers.py'),
'fastai.layers.Cat.forward': ('layers.html#cat.forward', 'fastai/layers.py'),
'fastai.layers.ConvLayer': ('layers.html#convlayer', 'fastai/layers.py'),
'fastai.layers.ConvLayer.__init__': ('layers.html#convlayer.__init__', 'fastai/layers.py'),
'fastai.layers.Debugger': ('layers.html#debugger', 'fastai/layers.py'),
'fastai.layers.Embedding': ('layers.html#embedding', 'fastai/layers.py'),
'fastai.layers.Embedding.__init__': ('layers.html#embedding.__init__', 'fastai/layers.py'),
'fastai.layers.Flatten': ('layers.html#flatten', 'fastai/layers.py'),
'fastai.layers.Identity': ('layers.html#identity', 'fastai/layers.py'),
'fastai.layers.InstanceNorm': ('layers.html#instancenorm', 'fastai/layers.py'),
'fastai.layers.Lambda': ('layers.html#lambda', 'fastai/layers.py'),
'fastai.layers.LinBnDrop': ('layers.html#linbndrop', 'fastai/layers.py'),
'fastai.layers.LinBnDrop.__init__': ('layers.html#linbndrop.__init__', 'fastai/layers.py'),
'fastai.layers.MaxPool': ('layers.html#maxpool', 'fastai/layers.py'),
'fastai.layers.MergeLayer': ('layers.html#mergelayer', 'fastai/layers.py'),
'fastai.layers.MergeLayer.__init__': ('layers.html#mergelayer.__init__', 'fastai/layers.py'),
'fastai.layers.MergeLayer.forward': ('layers.html#mergelayer.forward', 'fastai/layers.py'),
'fastai.layers.Mish': ('layers.html#mish', 'fastai/layers.py'),
'fastai.layers.Mish.forward': ('layers.html#mish.forward', 'fastai/layers.py'),
'fastai.layers.MishJitAutoFn': ('layers.html#mishjitautofn', 'fastai/layers.py'),
'fastai.layers.MishJitAutoFn.backward': ('layers.html#mishjitautofn.backward', 'fastai/layers.py'),
'fastai.layers.MishJitAutoFn.forward': ('layers.html#mishjitautofn.forward', 'fastai/layers.py'),
'fastai.layers.NoneReduce': ('layers.html#nonereduce', 'fastai/layers.py'),
'fastai.layers.NoneReduce.__enter__': ('layers.html#nonereduce.__enter__', 'fastai/layers.py'),
'fastai.layers.NoneReduce.__exit__': ('layers.html#nonereduce.__exit__', 'fastai/layers.py'),
'fastai.layers.NoneReduce.__init__': ('layers.html#nonereduce.__init__', 'fastai/layers.py'),
'fastai.layers.ParameterModule': ('layers.html#parametermodule', 'fastai/layers.py'),
'fastai.layers.ParameterModule.__init__': ('layers.html#parametermodule.__init__', 'fastai/layers.py'),
'fastai.layers.ParameterModule.forward': ('layers.html#parametermodule.forward', 'fastai/layers.py'),
'fastai.layers.PartialLambda': ('layers.html#partiallambda', 'fastai/layers.py'),
'fastai.layers.PartialLambda.__init__': ('layers.html#partiallambda.__init__', 'fastai/layers.py'),
'fastai.layers.PartialLambda.__repr__': ('layers.html#partiallambda.__repr__', 'fastai/layers.py'),
'fastai.layers.PartialLambda.forward': ('layers.html#partiallambda.forward', 'fastai/layers.py'),
'fastai.layers.PixelShuffle_ICNR': ('layers.html#pixelshuffle_icnr', 'fastai/layers.py'),
'fastai.layers.PixelShuffle_ICNR.__init__': ('layers.html#pixelshuffle_icnr.__init__', 'fastai/layers.py'),
'fastai.layers.PoolFlatten': ('layers.html#poolflatten', 'fastai/layers.py'),
'fastai.layers.PoolFlatten.__init__': ('layers.html#poolflatten.__init__', 'fastai/layers.py'),
'fastai.layers.PoolType': ('layers.html#pooltype', 'fastai/layers.py'),
'fastai.layers.PooledSelfAttention2d': ('layers.html#pooledselfattention2d', 'fastai/layers.py'),
'fastai.layers.PooledSelfAttention2d.__init__': ( 'layers.html#pooledselfattention2d.__init__',
'fastai/layers.py'),
'fastai.layers.PooledSelfAttention2d._conv': ('layers.html#pooledselfattention2d._conv', 'fastai/layers.py'),
'fastai.layers.PooledSelfAttention2d.forward': ( 'layers.html#pooledselfattention2d.forward',
'fastai/layers.py'),
'fastai.layers.ProdLayer': ('layers.html#prodlayer', 'fastai/layers.py'),
'fastai.layers.ProdLayer.forward': ('layers.html#prodlayer.forward', 'fastai/layers.py'),
'fastai.layers.ResBlock': ('layers.html#resblock', 'fastai/layers.py'),
'fastai.layers.ResBlock.__init__': ('layers.html#resblock.__init__', 'fastai/layers.py'),
'fastai.layers.ResBlock.forward': ('layers.html#resblock.forward', 'fastai/layers.py'),
'fastai.layers.ResizeBatch': ('layers.html#resizebatch', 'fastai/layers.py'),
'fastai.layers.ResizeBatch.__init__': ('layers.html#resizebatch.__init__', 'fastai/layers.py'),
'fastai.layers.ResizeBatch.forward': ('layers.html#resizebatch.forward', 'fastai/layers.py'),
'fastai.layers.SEBlock': ('layers.html#seblock', 'fastai/layers.py'),
'fastai.layers.SEModule': ('layers.html#semodule', 'fastai/layers.py'),
'fastai.layers.SEResNeXtBlock': ('layers.html#seresnextblock', 'fastai/layers.py'),
'fastai.layers.SelfAttention': ('layers.html#selfattention', 'fastai/layers.py'),
'fastai.layers.SelfAttention.__init__': ('layers.html#selfattention.__init__', 'fastai/layers.py'),
'fastai.layers.SelfAttention._conv': ('layers.html#selfattention._conv', 'fastai/layers.py'),
'fastai.layers.SelfAttention.forward': ('layers.html#selfattention.forward', 'fastai/layers.py'),
'fastai.layers.SeparableBlock': ('layers.html#separableblock', 'fastai/layers.py'),
'fastai.layers.SequentialEx': ('layers.html#sequentialex', 'fastai/layers.py'),
'fastai.layers.SequentialEx.__getitem__': ('layers.html#sequentialex.__getitem__', 'fastai/layers.py'),
'fastai.layers.SequentialEx.__init__': ('layers.html#sequentialex.__init__', 'fastai/layers.py'),
'fastai.layers.SequentialEx.append': ('layers.html#sequentialex.append', 'fastai/layers.py'),
'fastai.layers.SequentialEx.extend': ('layers.html#sequentialex.extend', 'fastai/layers.py'),
'fastai.layers.SequentialEx.forward': ('layers.html#sequentialex.forward', 'fastai/layers.py'),
'fastai.layers.SequentialEx.insert': ('layers.html#sequentialex.insert', 'fastai/layers.py'),
'fastai.layers.SigmoidRange': ('layers.html#sigmoidrange', 'fastai/layers.py'),
'fastai.layers.SimpleCNN': ('layers.html#simplecnn', 'fastai/layers.py'),
'fastai.layers.SimpleCNN.__init__': ('layers.html#simplecnn.__init__', 'fastai/layers.py'),
'fastai.layers.SimpleSelfAttention': ('layers.html#simpleselfattention', 'fastai/layers.py'),
'fastai.layers.SimpleSelfAttention.__init__': ( 'layers.html#simpleselfattention.__init__',
'fastai/layers.py'),
'fastai.layers.SimpleSelfAttention.forward': ('layers.html#simpleselfattention.forward', 'fastai/layers.py'),
'fastai.layers.Swish': ('layers.html#swish', 'fastai/layers.py'),
'fastai.layers.Swish.forward': ('layers.html#swish.forward', 'fastai/layers.py'),
'fastai.layers.TimeDistributed': ('layers.html#timedistributed', 'fastai/layers.py'),
'fastai.layers.TimeDistributed.__init__': ('layers.html#timedistributed.__init__', 'fastai/layers.py'),
'fastai.layers.TimeDistributed.__repr__': ('layers.html#timedistributed.__repr__', 'fastai/layers.py'),
'fastai.layers.TimeDistributed.format_output': ( 'layers.html#timedistributed.format_output',
'fastai/layers.py'),
'fastai.layers.TimeDistributed.forward': ('layers.html#timedistributed.forward', 'fastai/layers.py'),
'fastai.layers.TimeDistributed.low_mem_forward': ( 'layers.html#timedistributed.low_mem_forward',
'fastai/layers.py'),
'fastai.layers.ToTensorBase': ('layers.html#totensorbase', 'fastai/layers.py'),
'fastai.layers.View': ('layers.html#view', 'fastai/layers.py'),
'fastai.layers.View.__init__': ('layers.html#view.__init__', 'fastai/layers.py'),
'fastai.layers.View.forward': ('layers.html#view.forward', 'fastai/layers.py'),
'fastai.layers._SwishJitAutoFn': ('layers.html#_swishjitautofn', 'fastai/layers.py'),
'fastai.layers._SwishJitAutoFn.backward': ('layers.html#_swishjitautofn.backward', 'fastai/layers.py'),
'fastai.layers._SwishJitAutoFn.forward': ('layers.html#_swishjitautofn.forward', 'fastai/layers.py'),
'fastai.layers._conv1d_spect': ('layers.html#_conv1d_spect', 'fastai/layers.py'),
'fastai.layers._conv_func': ('layers.html#_conv_func', 'fastai/layers.py'),
'fastai.layers._get_norm': ('layers.html#_get_norm', 'fastai/layers.py'),
'fastai.layers._mish_jit_bwd': ('layers.html#_mish_jit_bwd', 'fastai/layers.py'),
'fastai.layers._mish_jit_fwd': ('layers.html#_mish_jit_fwd', 'fastai/layers.py'),
'fastai.layers._stack_tups': ('layers.html#_stack_tups', 'fastai/layers.py'),
'fastai.layers._swish_jit_bwd': ('layers.html#_swish_jit_bwd', 'fastai/layers.py'),
'fastai.layers._swish_jit_fwd': ('layers.html#_swish_jit_fwd', 'fastai/layers.py'),
'fastai.layers.adaptive_pool': ('layers.html#adaptive_pool', 'fastai/layers.py'),
'fastai.layers.children_and_parameters': ('layers.html#children_and_parameters', 'fastai/layers.py'),
'fastai.layers.flatten_model': ('layers.html#flatten_model', 'fastai/layers.py'),
'fastai.layers.has_children': ('layers.html#has_children', 'fastai/layers.py'),
'fastai.layers.icnr_init': ('layers.html#icnr_init', 'fastai/layers.py'),
'fastai.layers.in_channels': ('layers.html#in_channels', 'fastai/layers.py'),
'fastai.layers.init_default': ('layers.html#init_default', 'fastai/layers.py'),
'fastai.layers.init_linear': ('layers.html#init_linear', 'fastai/layers.py'),
'fastai.layers.mish': ('layers.html#mish', 'fastai/layers.py'),
'fastai.layers.module': ('layers.html#module', 'fastai/layers.py'),
'fastai.layers.sequential': ('layers.html#sequential', 'fastai/layers.py'),
'fastai.layers.sigmoid': ('layers.html#sigmoid', 'fastai/layers.py'),
'fastai.layers.sigmoid_': ('layers.html#sigmoid_', 'fastai/layers.py'),
'fastai.layers.sigmoid_range': ('layers.html#sigmoid_range', 'fastai/layers.py'),
'fastai.layers.swish': ('layers.html#swish', 'fastai/layers.py'),
'fastai.layers.trunc_normal_': ('layers.html#trunc_normal_', 'fastai/layers.py'),
'fastai.layers.vleaky_relu': ('layers.html#vleaky_relu', 'fastai/layers.py')},
'fastai.learner': { 'fastai.learner.AvgLoss': ('learner.html#avgloss', 'fastai/learner.py'),
'fastai.learner.AvgLoss.accumulate': ('learner.html#avgloss.accumulate', 'fastai/learner.py'),
'fastai.learner.AvgLoss.name': ('learner.html#avgloss.name', 'fastai/learner.py'),
'fastai.learner.AvgLoss.reset': ('learner.html#avgloss.reset', 'fastai/learner.py'),
'fastai.learner.AvgLoss.value': ('learner.html#avgloss.value', 'fastai/learner.py'),
'fastai.learner.AvgMetric': ('learner.html#avgmetric', 'fastai/learner.py'),
'fastai.learner.AvgMetric.__init__': ('learner.html#avgmetric.__init__', 'fastai/learner.py'),
'fastai.learner.AvgMetric.accumulate': ('learner.html#avgmetric.accumulate', 'fastai/learner.py'),
'fastai.learner.AvgMetric.name': ('learner.html#avgmetric.name', 'fastai/learner.py'),
'fastai.learner.AvgMetric.reset': ('learner.html#avgmetric.reset', 'fastai/learner.py'),
'fastai.learner.AvgMetric.value': ('learner.html#avgmetric.value', 'fastai/learner.py'),
'fastai.learner.AvgSmoothLoss': ('learner.html#avgsmoothloss', 'fastai/learner.py'),
'fastai.learner.AvgSmoothLoss.__init__': ('learner.html#avgsmoothloss.__init__', 'fastai/learner.py'),
'fastai.learner.AvgSmoothLoss.accumulate': ('learner.html#avgsmoothloss.accumulate', 'fastai/learner.py'),
'fastai.learner.AvgSmoothLoss.reset': ('learner.html#avgsmoothloss.reset', 'fastai/learner.py'),
'fastai.learner.AvgSmoothLoss.value': ('learner.html#avgsmoothloss.value', 'fastai/learner.py'),
'fastai.learner.CastToTensor': ('learner.html#casttotensor', 'fastai/learner.py'),
'fastai.learner.CastToTensor.before_batch': ('learner.html#casttotensor.before_batch', 'fastai/learner.py'),
'fastai.learner.Learner': ('learner.html#learner', 'fastai/learner.py'),
'fastai.learner.Learner.__call__': ('learner.html#learner.__call__', 'fastai/learner.py'),
'fastai.learner.Learner.__enter__': ('learner.html#learner.__enter__', 'fastai/learner.py'),
'fastai.learner.Learner.__exit__': ('learner.html#learner.__exit__', 'fastai/learner.py'),
'fastai.learner.Learner.__getstate__': ('learner.html#learner.__getstate__', 'fastai/learner.py'),
'fastai.learner.Learner.__init__': ('learner.html#learner.__init__', 'fastai/learner.py'),
'fastai.learner.Learner.__setstate__': ('learner.html#learner.__setstate__', 'fastai/learner.py'),
'fastai.learner.Learner._backward': ('learner.html#learner._backward', 'fastai/learner.py'),
'fastai.learner.Learner._bn_bias_state': ('learner.html#learner._bn_bias_state', 'fastai/learner.py'),
'fastai.learner.Learner._call_one': ('learner.html#learner._call_one', 'fastai/learner.py'),
'fastai.learner.Learner._do_epoch': ('learner.html#learner._do_epoch', 'fastai/learner.py'),
'fastai.learner.Learner._do_epoch_train': ('learner.html#learner._do_epoch_train', 'fastai/learner.py'),
'fastai.learner.Learner._do_epoch_validate': ( 'learner.html#learner._do_epoch_validate',
'fastai/learner.py'),
'fastai.learner.Learner._do_fit': ('learner.html#learner._do_fit', 'fastai/learner.py'),
'fastai.learner.Learner._do_grad_opt': ('learner.html#learner._do_grad_opt', 'fastai/learner.py'),
'fastai.learner.Learner._do_one_batch': ('learner.html#learner._do_one_batch', 'fastai/learner.py'),
'fastai.learner.Learner._end_cleanup': ('learner.html#learner._end_cleanup', 'fastai/learner.py'),
'fastai.learner.Learner._grab_cbs': ('learner.html#learner._grab_cbs', 'fastai/learner.py'),
'fastai.learner.Learner._set_device': ('learner.html#learner._set_device', 'fastai/learner.py'),
'fastai.learner.Learner._split': ('learner.html#learner._split', 'fastai/learner.py'),
'fastai.learner.Learner._step': ('learner.html#learner._step', 'fastai/learner.py'),
'fastai.learner.Learner._with_events': ('learner.html#learner._with_events', 'fastai/learner.py'),
'fastai.learner.Learner.add_cb': ('learner.html#learner.add_cb', 'fastai/learner.py'),
'fastai.learner.Learner.add_cbs': ('learner.html#learner.add_cbs', 'fastai/learner.py'),
'fastai.learner.Learner.added_cbs': ('learner.html#learner.added_cbs', 'fastai/learner.py'),
'fastai.learner.Learner.all_batches': ('learner.html#learner.all_batches', 'fastai/learner.py'),
'fastai.learner.Learner.create_opt': ('learner.html#learner.create_opt', 'fastai/learner.py'),
'fastai.learner.Learner.export': ('learner.html#learner.export', 'fastai/learner.py'),
'fastai.learner.Learner.fit': ('learner.html#learner.fit', 'fastai/learner.py'),
'fastai.learner.Learner.freeze': ('learner.html#learner.freeze', 'fastai/learner.py'),
'fastai.learner.Learner.freeze_to': ('learner.html#learner.freeze_to', 'fastai/learner.py'),
'fastai.learner.Learner.get_preds': ('learner.html#learner.get_preds', 'fastai/learner.py'),
'fastai.learner.Learner.load': ('learner.html#learner.load', 'fastai/learner.py'),
'fastai.learner.Learner.loss_not_reduced': ('learner.html#learner.loss_not_reduced', 'fastai/learner.py'),
'fastai.learner.Learner.metrics': ('learner.html#learner.metrics', 'fastai/learner.py'),
'fastai.learner.Learner.no_logging': ('learner.html#learner.no_logging', 'fastai/learner.py'),
'fastai.learner.Learner.no_mbar': ('learner.html#learner.no_mbar', 'fastai/learner.py'),
'fastai.learner.Learner.one_batch': ('learner.html#learner.one_batch', 'fastai/learner.py'),
'fastai.learner.Learner.ordered_cbs': ('learner.html#learner.ordered_cbs', 'fastai/learner.py'),
'fastai.learner.Learner.predict': ('learner.html#learner.predict', 'fastai/learner.py'),
'fastai.learner.Learner.remove_cb': ('learner.html#learner.remove_cb', 'fastai/learner.py'),
'fastai.learner.Learner.remove_cbs': ('learner.html#learner.remove_cbs', 'fastai/learner.py'),
'fastai.learner.Learner.removed_cbs': ('learner.html#learner.removed_cbs', 'fastai/learner.py'),
'fastai.learner.Learner.save': ('learner.html#learner.save', 'fastai/learner.py'),
'fastai.learner.Learner.show_results': ('learner.html#learner.show_results', 'fastai/learner.py'),
'fastai.learner.Learner.show_training_loop': ( 'learner.html#learner.show_training_loop',
'fastai/learner.py'),
'fastai.learner.Learner.to_detach': ('learner.html#learner.to_detach', 'fastai/learner.py'),
'fastai.learner.Learner.tta': ('learner.html#learner.tta', 'fastai/learner.py'),
'fastai.learner.Learner.unfreeze': ('learner.html#learner.unfreeze', 'fastai/learner.py'),
'fastai.learner.Learner.validate': ('learner.html#learner.validate', 'fastai/learner.py'),
'fastai.learner.Learner.validation_context': ( 'learner.html#learner.validation_context',
'fastai/learner.py'),
'fastai.learner.Metric': ('learner.html#metric', 'fastai/learner.py'),
'fastai.learner.Metric.accumulate': ('learner.html#metric.accumulate', 'fastai/learner.py'),
'fastai.learner.Metric.name': ('learner.html#metric.name', 'fastai/learner.py'),
'fastai.learner.Metric.reset': ('learner.html#metric.reset', 'fastai/learner.py'),
'fastai.learner.Metric.value': ('learner.html#metric.value', 'fastai/learner.py'),
'fastai.learner.Recorder': ('learner.html#recorder', 'fastai/learner.py'),
'fastai.learner.Recorder.__init__': ('learner.html#recorder.__init__', 'fastai/learner.py'),
'fastai.learner.Recorder._train_mets': ('learner.html#recorder._train_mets', 'fastai/learner.py'),
'fastai.learner.Recorder._valid_mets': ('learner.html#recorder._valid_mets', 'fastai/learner.py'),
'fastai.learner.Recorder.after_batch': ('learner.html#recorder.after_batch', 'fastai/learner.py'),
'fastai.learner.Recorder.after_cancel_train': ( 'learner.html#recorder.after_cancel_train',
'fastai/learner.py'),
'fastai.learner.Recorder.after_cancel_validate': ( 'learner.html#recorder.after_cancel_validate',
'fastai/learner.py'),
'fastai.learner.Recorder.after_epoch': ('learner.html#recorder.after_epoch', 'fastai/learner.py'),
'fastai.learner.Recorder.after_train': ('learner.html#recorder.after_train', 'fastai/learner.py'),
'fastai.learner.Recorder.after_validate': ('learner.html#recorder.after_validate', 'fastai/learner.py'),
'fastai.learner.Recorder.before_epoch': ('learner.html#recorder.before_epoch', 'fastai/learner.py'),
'fastai.learner.Recorder.before_fit': ('learner.html#recorder.before_fit', 'fastai/learner.py'),
'fastai.learner.Recorder.before_train': ('learner.html#recorder.before_train', 'fastai/learner.py'),
'fastai.learner.Recorder.before_validate': ('learner.html#recorder.before_validate', 'fastai/learner.py'),
'fastai.learner.Recorder.plot_loss': ('learner.html#recorder.plot_loss', 'fastai/learner.py'),
'fastai.learner.SkipToEpoch': ('learner.html#skiptoepoch', 'fastai/learner.py'),
'fastai.learner.SkipToEpoch.__init__': ('learner.html#skiptoepoch.__init__', 'fastai/learner.py'),
'fastai.learner.SkipToEpoch.before_epoch': ('learner.html#skiptoepoch.before_epoch', 'fastai/learner.py'),
'fastai.learner.ValueMetric': ('learner.html#valuemetric', 'fastai/learner.py'),
'fastai.learner.ValueMetric.__init__': ('learner.html#valuemetric.__init__', 'fastai/learner.py'),
'fastai.learner.ValueMetric.name': ('learner.html#valuemetric.name', 'fastai/learner.py'),
'fastai.learner.ValueMetric.value': ('learner.html#valuemetric.value', 'fastai/learner.py'),
'fastai.learner._ConstantFunc': ('learner.html#_constantfunc', 'fastai/learner.py'),
'fastai.learner._ConstantFunc.__call__': ('learner.html#_constantfunc.__call__', 'fastai/learner.py'),
'fastai.learner._ConstantFunc.__init__': ('learner.html#_constantfunc.__init__', 'fastai/learner.py'),
'fastai.learner._before_batch_cb': ('learner.html#_before_batch_cb', 'fastai/learner.py'),
'fastai.learner._cast_tensor': ('learner.html#_cast_tensor', 'fastai/learner.py'),
'fastai.learner._maybe_item': ('learner.html#_maybe_item', 'fastai/learner.py'),
'fastai.learner._try_concat': ('learner.html#_try_concat', 'fastai/learner.py'),
'fastai.learner.before_batch_cb': ('learner.html#before_batch_cb', 'fastai/learner.py'),
'fastai.learner.load_learner': ('learner.html#load_learner', 'fastai/learner.py'),
'fastai.learner.load_model': ('learner.html#load_model', 'fastai/learner.py'),
'fastai.learner.mk_metric': ('learner.html#mk_metric', 'fastai/learner.py'),
'fastai.learner.replacing_yield': ('learner.html#replacing_yield', 'fastai/learner.py'),
'fastai.learner.save_model': ('learner.html#save_model', 'fastai/learner.py')},
'fastai.losses': { 'fastai.losses.BCELossFlat': ('losses.html#bcelossflat', 'fastai/losses.py'),
'fastai.losses.BCEWithLogitsLossFlat': ('losses.html#bcewithlogitslossflat', 'fastai/losses.py'),
'fastai.losses.BCEWithLogitsLossFlat.__init__': ( 'losses.html#bcewithlogitslossflat.__init__',
'fastai/losses.py'),
'fastai.losses.BCEWithLogitsLossFlat.activation': ( 'losses.html#bcewithlogitslossflat.activation',
'fastai/losses.py'),
'fastai.losses.BCEWithLogitsLossFlat.decodes': ( 'losses.html#bcewithlogitslossflat.decodes',
'fastai/losses.py'),
'fastai.losses.BaseLoss': ('losses.html#baseloss', 'fastai/losses.py'),
'fastai.losses.BaseLoss.__call__': ('losses.html#baseloss.__call__', 'fastai/losses.py'),
'fastai.losses.BaseLoss.__init__': ('losses.html#baseloss.__init__', 'fastai/losses.py'),
'fastai.losses.BaseLoss.__repr__': ('losses.html#baseloss.__repr__', 'fastai/losses.py'),
'fastai.losses.BaseLoss._contiguous': ('losses.html#baseloss._contiguous', 'fastai/losses.py'),
'fastai.losses.BaseLoss.reduction': ('losses.html#baseloss.reduction', 'fastai/losses.py'),
'fastai.losses.BaseLoss.to': ('losses.html#baseloss.to', 'fastai/losses.py'),
'fastai.losses.CrossEntropyLossFlat': ('losses.html#crossentropylossflat', 'fastai/losses.py'),
'fastai.losses.CrossEntropyLossFlat.__init__': ( 'losses.html#crossentropylossflat.__init__',
'fastai/losses.py'),
'fastai.losses.CrossEntropyLossFlat.activation': ( 'losses.html#crossentropylossflat.activation',
'fastai/losses.py'),
'fastai.losses.CrossEntropyLossFlat.decodes': ( 'losses.html#crossentropylossflat.decodes',
'fastai/losses.py'),
'fastai.losses.DiceLoss': ('losses.html#diceloss', 'fastai/losses.py'),
'fastai.losses.DiceLoss.__call__': ('losses.html#diceloss.__call__', 'fastai/losses.py'),
'fastai.losses.DiceLoss.__init__': ('losses.html#diceloss.__init__', 'fastai/losses.py'),
'fastai.losses.DiceLoss._one_hot': ('losses.html#diceloss._one_hot', 'fastai/losses.py'),
'fastai.losses.DiceLoss.activation': ('losses.html#diceloss.activation', 'fastai/losses.py'),
'fastai.losses.DiceLoss.decodes': ('losses.html#diceloss.decodes', 'fastai/losses.py'),
'fastai.losses.FocalLoss': ('losses.html#focalloss', 'fastai/losses.py'),
'fastai.losses.FocalLoss.__init__': ('losses.html#focalloss.__init__', 'fastai/losses.py'),
'fastai.losses.FocalLoss.forward': ('losses.html#focalloss.forward', 'fastai/losses.py'),
'fastai.losses.FocalLossFlat': ('losses.html#focallossflat', 'fastai/losses.py'),
'fastai.losses.FocalLossFlat.__init__': ('losses.html#focallossflat.__init__', 'fastai/losses.py'),
'fastai.losses.FocalLossFlat.activation': ('losses.html#focallossflat.activation', 'fastai/losses.py'),
'fastai.losses.FocalLossFlat.decodes': ('losses.html#focallossflat.decodes', 'fastai/losses.py'),
'fastai.losses.L1LossFlat': ('losses.html#l1lossflat', 'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropy': ('losses.html#labelsmoothingcrossentropy', 'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropy.__init__': ( 'losses.html#labelsmoothingcrossentropy.__init__',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropy.activation': ( 'losses.html#labelsmoothingcrossentropy.activation',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropy.decodes': ( 'losses.html#labelsmoothingcrossentropy.decodes',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropy.forward': ( 'losses.html#labelsmoothingcrossentropy.forward',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropyFlat': ( 'losses.html#labelsmoothingcrossentropyflat',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropyFlat.__init__': ( 'losses.html#labelsmoothingcrossentropyflat.__init__',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropyFlat.activation': ( 'losses.html#labelsmoothingcrossentropyflat.activation',
'fastai/losses.py'),
'fastai.losses.LabelSmoothingCrossEntropyFlat.decodes': ( 'losses.html#labelsmoothingcrossentropyflat.decodes',
'fastai/losses.py'),
'fastai.losses.MSELossFlat': ('losses.html#mselossflat', 'fastai/losses.py')},
'fastai.medical.imaging': { 'fastai.medical.imaging.DcmDataset.as_dict': ( 'medical.imaging.html#dcmdataset.as_dict',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.hist_scaled': ( 'medical.imaging.html#dcmdataset.hist_scaled',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.mask_from_blur': ( 'medical.imaging.html#dcmdataset.mask_from_blur',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.pct_in_window': ( 'medical.imaging.html#dcmdataset.pct_in_window',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.pixels': ( 'medical.imaging.html#dcmdataset.pixels',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.save_jpg': ( 'medical.imaging.html#dcmdataset.save_jpg',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.save_tif16': ( 'medical.imaging.html#dcmdataset.save_tif16',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.scaled_px': ( 'medical.imaging.html#dcmdataset.scaled_px',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.set_pixels': ( 'medical.imaging.html#dcmdataset.set_pixels',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.shape': ( 'medical.imaging.html#dcmdataset.shape',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.show': ( 'medical.imaging.html#dcmdataset.show',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.to_3chan': ( 'medical.imaging.html#dcmdataset.to_3chan',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.to_nchan': ( 'medical.imaging.html#dcmdataset.to_nchan',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.to_uint16': ( 'medical.imaging.html#dcmdataset.to_uint16',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.windowed': ( 'medical.imaging.html#dcmdataset.windowed',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.zoom': ( 'medical.imaging.html#dcmdataset.zoom',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DcmDataset.zoom_to': ( 'medical.imaging.html#dcmdataset.zoom_to',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DicomSegmentationDataLoaders': ( 'medical.imaging.html#dicomsegmentationdataloaders',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.DicomSegmentationDataLoaders.from_label_func': ( 'medical.imaging.html#dicomsegmentationdataloaders.from_label_func',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.PILCTScan': ('medical.imaging.html#pilctscan', 'fastai/medical/imaging.py'),
'fastai.medical.imaging.PILDicom': ('medical.imaging.html#pildicom', 'fastai/medical/imaging.py'),
'fastai.medical.imaging.PILDicom.create': ( 'medical.imaging.html#pildicom.create',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Path.dcmread': ( 'medical.imaging.html#path.dcmread',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Path.png16read': ( 'medical.imaging.html#path.png16read',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.freqhist_bins': ( 'medical.imaging.html#tensor.freqhist_bins',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.hist_scaled': ( 'medical.imaging.html#tensor.hist_scaled',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.hist_scaled_pt': ( 'medical.imaging.html#tensor.hist_scaled_pt',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.mask_from_blur': ( 'medical.imaging.html#tensor.mask_from_blur',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.save_jpg': ( 'medical.imaging.html#tensor.save_jpg',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.save_tif16': ( 'medical.imaging.html#tensor.save_tif16',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.to_3chan': ( 'medical.imaging.html#tensor.to_3chan',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.to_nchan': ( 'medical.imaging.html#tensor.to_nchan',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.to_uint16': ( 'medical.imaging.html#tensor.to_uint16',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.Tensor.windowed': ( 'medical.imaging.html#tensor.windowed',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.TensorCTScan': ( 'medical.imaging.html#tensorctscan',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.TensorDicom': ( 'medical.imaging.html#tensordicom',
'fastai/medical/imaging.py'),
'fastai.medical.imaging._bbs2sizes': ( 'medical.imaging.html#_bbs2sizes',
'fastai/medical/imaging.py'),
'fastai.medical.imaging._cast_dicom_special': ( 'medical.imaging.html#_cast_dicom_special',
'fastai/medical/imaging.py'),
'fastai.medical.imaging._dcm2dict': ('medical.imaging.html#_dcm2dict', 'fastai/medical/imaging.py'),
'fastai.medical.imaging._from_dicoms': ( 'medical.imaging.html#_from_dicoms',
'fastai/medical/imaging.py'),
'fastai.medical.imaging._px_bounds': ( 'medical.imaging.html#_px_bounds',
'fastai/medical/imaging.py'),
'fastai.medical.imaging._split_elem': ( 'medical.imaging.html#_split_elem',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.array_freqhist_bins': ( 'medical.imaging.html#array_freqhist_bins',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.crop_resize': ( 'medical.imaging.html#crop_resize',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.gauss_blur2d': ( 'medical.imaging.html#gauss_blur2d',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.get_dicom_files': ( 'medical.imaging.html#get_dicom_files',
'fastai/medical/imaging.py'),
'fastai.medical.imaging.mask2bbox': ('medical.imaging.html#mask2bbox', 'fastai/medical/imaging.py'),
'fastai.medical.imaging.uniform_blur2d': ( 'medical.imaging.html#uniform_blur2d',
'fastai/medical/imaging.py')},
'fastai.medical.text': {},
'fastai.metrics': { 'fastai.metrics.APScoreBinary': ('metrics.html#apscorebinary', 'fastai/metrics.py'),
'fastai.metrics.APScoreMulti': ('metrics.html#apscoremulti', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric': ('metrics.html#accummetric', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.__call__': ('metrics.html#accummetric.__call__', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.__init__': ('metrics.html#accummetric.__init__', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.accum_values': ('metrics.html#accummetric.accum_values', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.accumulate': ('metrics.html#accummetric.accumulate', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.name': ('metrics.html#accummetric.name', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.reset': ('metrics.html#accummetric.reset', 'fastai/metrics.py'),
'fastai.metrics.AccumMetric.value': ('metrics.html#accummetric.value', 'fastai/metrics.py'),
'fastai.metrics.BalancedAccuracy': ('metrics.html#balancedaccuracy', 'fastai/metrics.py'),
'fastai.metrics.BrierScore': ('metrics.html#brierscore', 'fastai/metrics.py'),
'fastai.metrics.BrierScoreMulti': ('metrics.html#brierscoremulti', 'fastai/metrics.py'),
'fastai.metrics.CohenKappa': ('metrics.html#cohenkappa', 'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric': ('metrics.html#corpusbleumetric', 'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.NGram': ('metrics.html#corpusbleumetric.ngram', 'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.__init__': ('metrics.html#corpusbleumetric.__init__', 'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.accumulate': ( 'metrics.html#corpusbleumetric.accumulate',
'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.get_correct_ngrams': ( 'metrics.html#corpusbleumetric.get_correct_ngrams',
'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.get_grams': ( 'metrics.html#corpusbleumetric.get_grams',
'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.reset': ('metrics.html#corpusbleumetric.reset', 'fastai/metrics.py'),
'fastai.metrics.CorpusBLEUMetric.value': ('metrics.html#corpusbleumetric.value', 'fastai/metrics.py'),
'fastai.metrics.Dice': ('metrics.html#dice', 'fastai/metrics.py'),
'fastai.metrics.Dice.__init__': ('metrics.html#dice.__init__', 'fastai/metrics.py'),
'fastai.metrics.Dice.accumulate': ('metrics.html#dice.accumulate', 'fastai/metrics.py'),
'fastai.metrics.Dice.reset': ('metrics.html#dice.reset', 'fastai/metrics.py'),
'fastai.metrics.Dice.value': ('metrics.html#dice.value', 'fastai/metrics.py'),
'fastai.metrics.DiceMulti': ('metrics.html#dicemulti', 'fastai/metrics.py'),
'fastai.metrics.DiceMulti.__init__': ('metrics.html#dicemulti.__init__', 'fastai/metrics.py'),
'fastai.metrics.DiceMulti.accumulate': ('metrics.html#dicemulti.accumulate', 'fastai/metrics.py'),
'fastai.metrics.DiceMulti.reset': ('metrics.html#dicemulti.reset', 'fastai/metrics.py'),
'fastai.metrics.DiceMulti.value': ('metrics.html#dicemulti.value', 'fastai/metrics.py'),
'fastai.metrics.ExplainedVariance': ('metrics.html#explainedvariance', 'fastai/metrics.py'),
'fastai.metrics.F1Score': ('metrics.html#f1score', 'fastai/metrics.py'),
'fastai.metrics.F1ScoreMulti': ('metrics.html#f1scoremulti', 'fastai/metrics.py'),
'fastai.metrics.FBeta': ('metrics.html#fbeta', 'fastai/metrics.py'),
'fastai.metrics.FBetaMulti': ('metrics.html#fbetamulti', 'fastai/metrics.py'),
'fastai.metrics.HammingLoss': ('metrics.html#hammingloss', 'fastai/metrics.py'),
'fastai.metrics.HammingLossMulti': ('metrics.html#hamminglossmulti', 'fastai/metrics.py'),
'fastai.metrics.Jaccard': ('metrics.html#jaccard', 'fastai/metrics.py'),
'fastai.metrics.JaccardCoeff': ('metrics.html#jaccardcoeff', 'fastai/metrics.py'),
'fastai.metrics.JaccardCoeff.value': ('metrics.html#jaccardcoeff.value', 'fastai/metrics.py'),
'fastai.metrics.JaccardMulti': ('metrics.html#jaccardmulti', 'fastai/metrics.py'),
'fastai.metrics.LossMetric': ('metrics.html#lossmetric', 'fastai/metrics.py'),
'fastai.metrics.LossMetric.__init__': ('metrics.html#lossmetric.__init__', 'fastai/metrics.py'),
'fastai.metrics.LossMetric.accumulate': ('metrics.html#lossmetric.accumulate', 'fastai/metrics.py'),
'fastai.metrics.LossMetric.name': ('metrics.html#lossmetric.name', 'fastai/metrics.py'),
'fastai.metrics.LossMetrics': ('metrics.html#lossmetrics', 'fastai/metrics.py'),
'fastai.metrics.MatthewsCorrCoef': ('metrics.html#matthewscorrcoef', 'fastai/metrics.py'),
'fastai.metrics.MatthewsCorrCoefMulti': ('metrics.html#matthewscorrcoefmulti', 'fastai/metrics.py'),
'fastai.metrics.PearsonCorrCoef': ('metrics.html#pearsoncorrcoef', 'fastai/metrics.py'),
'fastai.metrics.Perplexity': ('metrics.html#perplexity', 'fastai/metrics.py'),
'fastai.metrics.Perplexity.name': ('metrics.html#perplexity.name', 'fastai/metrics.py'),
'fastai.metrics.Perplexity.value': ('metrics.html#perplexity.value', 'fastai/metrics.py'),
'fastai.metrics.Precision': ('metrics.html#precision', 'fastai/metrics.py'),
'fastai.metrics.PrecisionMulti': ('metrics.html#precisionmulti', 'fastai/metrics.py'),
'fastai.metrics.R2Score': ('metrics.html#r2score', 'fastai/metrics.py'),
'fastai.metrics.Recall': ('metrics.html#recall', 'fastai/metrics.py'),
'fastai.metrics.RecallMulti': ('metrics.html#recallmulti', 'fastai/metrics.py'),
'fastai.metrics.RocAuc': ('metrics.html#rocauc', 'fastai/metrics.py'),
'fastai.metrics.RocAucBinary': ('metrics.html#rocaucbinary', 'fastai/metrics.py'),
'fastai.metrics.RocAucMulti': ('metrics.html#rocaucmulti', 'fastai/metrics.py'),
'fastai.metrics.SpearmanCorrCoef': ('metrics.html#spearmancorrcoef', 'fastai/metrics.py'),
'fastai.metrics._exp_rmspe': ('metrics.html#_exp_rmspe', 'fastai/metrics.py'),
'fastai.metrics._rmse': ('metrics.html#_rmse', 'fastai/metrics.py'),
'fastai.metrics.accuracy': ('metrics.html#accuracy', 'fastai/metrics.py'),
'fastai.metrics.accuracy_multi': ('metrics.html#accuracy_multi', 'fastai/metrics.py'),
'fastai.metrics.error_rate': ('metrics.html#error_rate', 'fastai/metrics.py'),
'fastai.metrics.foreground_acc': ('metrics.html#foreground_acc', 'fastai/metrics.py'),
'fastai.metrics.mae': ('metrics.html#mae', 'fastai/metrics.py'),
'fastai.metrics.mse': ('metrics.html#mse', 'fastai/metrics.py'),
'fastai.metrics.msle': ('metrics.html#msle', 'fastai/metrics.py'),
'fastai.metrics.optim_metric': ('metrics.html#optim_metric', 'fastai/metrics.py'),
'fastai.metrics.skm_to_fastai': ('metrics.html#skm_to_fastai', 'fastai/metrics.py'),
'fastai.metrics.top_k_accuracy': ('metrics.html#top_k_accuracy', 'fastai/metrics.py')},
'fastai.optimizer': { 'fastai.optimizer.Adam': ('optimizer.html#adam', 'fastai/optimizer.py'),
'fastai.optimizer.Lamb': ('optimizer.html#lamb', 'fastai/optimizer.py'),
'fastai.optimizer.Larc': ('optimizer.html#larc', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead': ('optimizer.html#lookahead', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.__init__': ('optimizer.html#lookahead.__init__', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead._copy_weights': ( 'optimizer.html#lookahead._copy_weights',
'fastai/optimizer.py'),
'fastai.optimizer.Lookahead._init_state': ('optimizer.html#lookahead._init_state', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.clear_state': ('optimizer.html#lookahead.clear_state', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.load_state_dict': ( 'optimizer.html#lookahead.load_state_dict',
'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.param_lists': ('optimizer.html#lookahead.param_lists', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.state_dict': ('optimizer.html#lookahead.state_dict', 'fastai/optimizer.py'),
'fastai.optimizer.Lookahead.step': ('optimizer.html#lookahead.step', 'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper': ('optimizer.html#optimwrapper', 'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper.__init__': ('optimizer.html#optimwrapper.__init__', 'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper._set_hyper': ( 'optimizer.html#optimwrapper._set_hyper',
'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper.clear_state': ( 'optimizer.html#optimwrapper.clear_state',
'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper.hypers': ('optimizer.html#optimwrapper.hypers', 'fastai/optimizer.py'),
'fastai.optimizer.OptimWrapper.param_lists': ( 'optimizer.html#optimwrapper.param_lists',
'fastai/optimizer.py'),
'fastai.optimizer.Optimizer': ('optimizer.html#optimizer', 'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.__init__': ('optimizer.html#optimizer.__init__', 'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.clear_state': ('optimizer.html#optimizer.clear_state', 'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.load_state_dict': ( 'optimizer.html#optimizer.load_state_dict',
'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.state_dict': ('optimizer.html#optimizer.state_dict', 'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.step': ('optimizer.html#optimizer.step', 'fastai/optimizer.py'),
'fastai.optimizer.Optimizer.zero_grad': ('optimizer.html#optimizer.zero_grad', 'fastai/optimizer.py'),
'fastai.optimizer.QHAdam': ('optimizer.html#qhadam', 'fastai/optimizer.py'),
'fastai.optimizer.RAdam': ('optimizer.html#radam', 'fastai/optimizer.py'),
'fastai.optimizer.RMSProp': ('optimizer.html#rmsprop', 'fastai/optimizer.py'),
'fastai.optimizer.SGD': ('optimizer.html#sgd', 'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer': ('optimizer.html#_baseoptimizer', 'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer._set_hyper': ( 'optimizer.html#_baseoptimizer._set_hyper',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer._set_require_grad': ( 'optimizer.html#_baseoptimizer._set_require_grad',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.all_params': ( 'optimizer.html#_baseoptimizer.all_params',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.freeze': ('optimizer.html#_baseoptimizer.freeze', 'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.freeze_to': ( 'optimizer.html#_baseoptimizer.freeze_to',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.param_groups': ( 'optimizer.html#_baseoptimizer.param_groups',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.set_hyper': ( 'optimizer.html#_baseoptimizer.set_hyper',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.set_hypers': ( 'optimizer.html#_baseoptimizer.set_hypers',
'fastai/optimizer.py'),
'fastai.optimizer._BaseOptimizer.unfreeze': ( 'optimizer.html#_baseoptimizer.unfreeze',
'fastai/optimizer.py'),
'fastai.optimizer._convert_params': ('optimizer.html#_convert_params', 'fastai/optimizer.py'),
'fastai.optimizer._update': ('optimizer.html#_update', 'fastai/optimizer.py'),
'fastai.optimizer.adam_step': ('optimizer.html#adam_step', 'fastai/optimizer.py'),
'fastai.optimizer.average_grad': ('optimizer.html#average_grad', 'fastai/optimizer.py'),
'fastai.optimizer.average_sqr_grad': ('optimizer.html#average_sqr_grad', 'fastai/optimizer.py'),
'fastai.optimizer.debias': ('optimizer.html#debias', 'fastai/optimizer.py'),
'fastai.optimizer.detuplify_pg': ('optimizer.html#detuplify_pg', 'fastai/optimizer.py'),
'fastai.optimizer.l2_reg': ('optimizer.html#l2_reg', 'fastai/optimizer.py'),
'fastai.optimizer.lamb_step': ('optimizer.html#lamb_step', 'fastai/optimizer.py'),
'fastai.optimizer.larc_layer_lr': ('optimizer.html#larc_layer_lr', 'fastai/optimizer.py'),
'fastai.optimizer.larc_step': ('optimizer.html#larc_step', 'fastai/optimizer.py'),
'fastai.optimizer.momentum_step': ('optimizer.html#momentum_step', 'fastai/optimizer.py'),
'fastai.optimizer.qhadam_step': ('optimizer.html#qhadam_step', 'fastai/optimizer.py'),
'fastai.optimizer.radam_step': ('optimizer.html#radam_step', 'fastai/optimizer.py'),
'fastai.optimizer.ranger': ('optimizer.html#ranger', 'fastai/optimizer.py'),
'fastai.optimizer.rms_prop_step': ('optimizer.html#rms_prop_step', 'fastai/optimizer.py'),
'fastai.optimizer.set_item_pg': ('optimizer.html#set_item_pg', 'fastai/optimizer.py'),
'fastai.optimizer.sgd_step': ('optimizer.html#sgd_step', 'fastai/optimizer.py'),
'fastai.optimizer.step_stat': ('optimizer.html#step_stat', 'fastai/optimizer.py'),
'fastai.optimizer.weight_decay': ('optimizer.html#weight_decay', 'fastai/optimizer.py')},
'fastai.tabular.all': {},
'fastai.tabular.core': { 'fastai.tabular.core.Categorify': ('tabular.core.html#categorify', 'fastai/tabular/core.py'),
'fastai.tabular.core.Categorify.__getitem__': ( 'tabular.core.html#categorify.__getitem__',
'fastai/tabular/core.py'),
'fastai.tabular.core.Categorify.decodes': ( 'tabular.core.html#categorify.decodes',
'fastai/tabular/core.py'),
'fastai.tabular.core.Categorify.encodes': ( 'tabular.core.html#categorify.encodes',
'fastai/tabular/core.py'),
'fastai.tabular.core.Categorify.setups': ( 'tabular.core.html#categorify.setups',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillMissing': ('tabular.core.html#fillmissing', 'fastai/tabular/core.py'),
'fastai.tabular.core.FillMissing.__init__': ( 'tabular.core.html#fillmissing.__init__',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillMissing.encodes': ( 'tabular.core.html#fillmissing.encodes',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillMissing.setups': ( 'tabular.core.html#fillmissing.setups',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillStrategy': ('tabular.core.html#fillstrategy', 'fastai/tabular/core.py'),
'fastai.tabular.core.FillStrategy.constant': ( 'tabular.core.html#fillstrategy.constant',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillStrategy.median': ( 'tabular.core.html#fillstrategy.median',
'fastai/tabular/core.py'),
'fastai.tabular.core.FillStrategy.mode': ( 'tabular.core.html#fillstrategy.mode',
'fastai/tabular/core.py'),
'fastai.tabular.core.ReadTabBatch': ('tabular.core.html#readtabbatch', 'fastai/tabular/core.py'),
'fastai.tabular.core.ReadTabBatch.__init__': ( 'tabular.core.html#readtabbatch.__init__',
'fastai/tabular/core.py'),
'fastai.tabular.core.ReadTabBatch.decodes': ( 'tabular.core.html#readtabbatch.decodes',
'fastai/tabular/core.py'),
'fastai.tabular.core.ReadTabBatch.encodes': ( 'tabular.core.html#readtabbatch.encodes',
'fastai/tabular/core.py'),
'fastai.tabular.core.TabDataLoader': ('tabular.core.html#tabdataloader', 'fastai/tabular/core.py'),
'fastai.tabular.core.TabDataLoader.__init__': ( 'tabular.core.html#tabdataloader.__init__',
'fastai/tabular/core.py'),
'fastai.tabular.core.TabDataLoader.create_batch': ( 'tabular.core.html#tabdataloader.create_batch',
'fastai/tabular/core.py'),
'fastai.tabular.core.TabDataLoader.do_item': ( 'tabular.core.html#tabdataloader.do_item',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular': ('tabular.core.html#tabular', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.__init__': ( 'tabular.core.html#tabular.__init__',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.all_col_names': ( 'tabular.core.html#tabular.all_col_names',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.copy': ('tabular.core.html#tabular.copy', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.decode': ('tabular.core.html#tabular.decode', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.decode_row': ( 'tabular.core.html#tabular.decode_row',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.iloc': ('tabular.core.html#tabular.iloc', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.loc': ('tabular.core.html#tabular.loc', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.n_subsets': ( 'tabular.core.html#tabular.n_subsets',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.new': ('tabular.core.html#tabular.new', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.new_empty': ( 'tabular.core.html#tabular.new_empty',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.process': ('tabular.core.html#tabular.process', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.setup': ('tabular.core.html#tabular.setup', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.show': ('tabular.core.html#tabular.show', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.subset': ('tabular.core.html#tabular.subset', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.targ': ('tabular.core.html#tabular.targ', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.to_device': ( 'tabular.core.html#tabular.to_device',
'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.x_names': ('tabular.core.html#tabular.x_names', 'fastai/tabular/core.py'),
'fastai.tabular.core.Tabular.y': ('tabular.core.html#tabular.y', 'fastai/tabular/core.py'),
'fastai.tabular.core.TabularPandas': ('tabular.core.html#tabularpandas', 'fastai/tabular/core.py'),
'fastai.tabular.core.TabularPandas.transform': ( 'tabular.core.html#tabularpandas.transform',
'fastai/tabular/core.py'),
'fastai.tabular.core.TabularProc': ('tabular.core.html#tabularproc', 'fastai/tabular/core.py'),
'fastai.tabular.core.TabularProc.name': ( 'tabular.core.html#tabularproc.name',
'fastai/tabular/core.py'),
'fastai.tabular.core.TabularProc.setup': ( 'tabular.core.html#tabularproc.setup',
'fastai/tabular/core.py'),
'fastai.tabular.core._TabIloc': ('tabular.core.html#_tabiloc', 'fastai/tabular/core.py'),
'fastai.tabular.core._TabIloc.__getitem__': ( 'tabular.core.html#_tabiloc.__getitem__',
'fastai/tabular/core.py'),
'fastai.tabular.core._TabIloc.__init__': ( 'tabular.core.html#_tabiloc.__init__',
'fastai/tabular/core.py'),
'fastai.tabular.core._add_prop': ('tabular.core.html#_add_prop', 'fastai/tabular/core.py'),
'fastai.tabular.core._apply_cats': ('tabular.core.html#_apply_cats', 'fastai/tabular/core.py'),
'fastai.tabular.core._decode_cats': ('tabular.core.html#_decode_cats', 'fastai/tabular/core.py'),
'fastai.tabular.core._get_elapsed': ('tabular.core.html#_get_elapsed', 'fastai/tabular/core.py'),
'fastai.tabular.core._maybe_expand': ('tabular.core.html#_maybe_expand', 'fastai/tabular/core.py'),
'fastai.tabular.core.add_datepart': ('tabular.core.html#add_datepart', 'fastai/tabular/core.py'),
'fastai.tabular.core.add_elapsed_times': ( 'tabular.core.html#add_elapsed_times',
'fastai/tabular/core.py'),
'fastai.tabular.core.cont_cat_split': ('tabular.core.html#cont_cat_split', 'fastai/tabular/core.py'),
'fastai.tabular.core.decodes': ('tabular.core.html#decodes', 'fastai/tabular/core.py'),
'fastai.tabular.core.df_shrink': ('tabular.core.html#df_shrink', 'fastai/tabular/core.py'),
'fastai.tabular.core.df_shrink_dtypes': ( 'tabular.core.html#df_shrink_dtypes',
'fastai/tabular/core.py'),
'fastai.tabular.core.encodes': ('tabular.core.html#encodes', 'fastai/tabular/core.py'),
'fastai.tabular.core.make_date': ('tabular.core.html#make_date', 'fastai/tabular/core.py'),
'fastai.tabular.core.setups': ('tabular.core.html#setups', 'fastai/tabular/core.py'),
'fastai.tabular.core.show_batch': ('tabular.core.html#show_batch', 'fastai/tabular/core.py')},
'fastai.tabular.data': { 'fastai.tabular.data.TabularDataLoaders': ( 'tabular.data.html#tabulardataloaders',
'fastai/tabular/data.py'),
'fastai.tabular.data.TabularDataLoaders.from_csv': ( 'tabular.data.html#tabulardataloaders.from_csv',
'fastai/tabular/data.py'),
'fastai.tabular.data.TabularDataLoaders.from_df': ( 'tabular.data.html#tabulardataloaders.from_df',
'fastai/tabular/data.py'),
'fastai.tabular.data.TabularDataLoaders.test_dl': ( 'tabular.data.html#tabulardataloaders.test_dl',
'fastai/tabular/data.py')},
'fastai.tabular.learner': { 'fastai.tabular.learner.TabularLearner': ( 'tabular.learner.html#tabularlearner',
'fastai/tabular/learner.py'),
'fastai.tabular.learner.TabularLearner.predict': ( 'tabular.learner.html#tabularlearner.predict',
'fastai/tabular/learner.py'),
'fastai.tabular.learner.show_results': ( 'tabular.learner.html#show_results',
'fastai/tabular/learner.py'),
'fastai.tabular.learner.tabular_learner': ( 'tabular.learner.html#tabular_learner',
'fastai/tabular/learner.py')},
'fastai.tabular.model': { 'fastai.tabular.model.TabularModel': ('tabular.model.html#tabularmodel', 'fastai/tabular/model.py'),
'fastai.tabular.model.TabularModel.__init__': ( 'tabular.model.html#tabularmodel.__init__',
'fastai/tabular/model.py'),
'fastai.tabular.model.TabularModel.forward': ( 'tabular.model.html#tabularmodel.forward',
'fastai/tabular/model.py'),
'fastai.tabular.model._one_emb_sz': ('tabular.model.html#_one_emb_sz', 'fastai/tabular/model.py'),
'fastai.tabular.model.emb_sz_rule': ('tabular.model.html#emb_sz_rule', 'fastai/tabular/model.py'),
'fastai.tabular.model.get_emb_sz': ('tabular.model.html#get_emb_sz', 'fastai/tabular/model.py'),
'fastai.tabular.model.tabular_config': ( 'tabular.model.html#tabular_config',
'fastai/tabular/model.py')},
'fastai.test_utils': { 'fastai.test_utils.RegModel': ('test_utils.html#regmodel', 'fastai/test_utils.py'),
'fastai.test_utils.RegModel.__init__': ('test_utils.html#regmodel.__init__', 'fastai/test_utils.py'),
'fastai.test_utils.RegModel.forward': ('test_utils.html#regmodel.forward', 'fastai/test_utils.py'),
'fastai.test_utils.VerboseCallback': ('test_utils.html#verbosecallback', 'fastai/test_utils.py'),
'fastai.test_utils.VerboseCallback.__call__': ( 'test_utils.html#verbosecallback.__call__',
'fastai/test_utils.py'),
'fastai.test_utils.get_env': ('test_utils.html#get_env', 'fastai/test_utils.py'),
'fastai.test_utils.nvidia_mem': ('test_utils.html#nvidia_mem', 'fastai/test_utils.py'),
'fastai.test_utils.nvidia_smi': ('test_utils.html#nvidia_smi', 'fastai/test_utils.py'),
'fastai.test_utils.show_install': ('test_utils.html#show_install', 'fastai/test_utils.py'),
'fastai.test_utils.synth_dbunch': ('test_utils.html#synth_dbunch', 'fastai/test_utils.py'),
'fastai.test_utils.synth_learner': ('test_utils.html#synth_learner', 'fastai/test_utils.py'),
'fastai.test_utils.try_import': ('test_utils.html#try_import', 'fastai/test_utils.py')},
'fastai.text.all': {},
'fastai.text.core': { 'fastai.text.core.BaseTokenizer': ('text.core.html#basetokenizer', 'fastai/text/core.py'),
'fastai.text.core.BaseTokenizer.__call__': ( 'text.core.html#basetokenizer.__call__',
'fastai/text/core.py'),
'fastai.text.core.BaseTokenizer.__init__': ( 'text.core.html#basetokenizer.__init__',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer': ( 'text.core.html#sentencepiecetokenizer',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer.__call__': ( 'text.core.html#sentencepiecetokenizer.__call__',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer.__init__': ( 'text.core.html#sentencepiecetokenizer.__init__',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer._get_vocab_sz': ( 'text.core.html#sentencepiecetokenizer._get_vocab_sz',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer.setup': ( 'text.core.html#sentencepiecetokenizer.setup',
'fastai/text/core.py'),
'fastai.text.core.SentencePieceTokenizer.train': ( 'text.core.html#sentencepiecetokenizer.train',
'fastai/text/core.py'),
'fastai.text.core.SpacyTokenizer': ('text.core.html#spacytokenizer', 'fastai/text/core.py'),
'fastai.text.core.SpacyTokenizer.__call__': ( 'text.core.html#spacytokenizer.__call__',
'fastai/text/core.py'),
'fastai.text.core.SpacyTokenizer.__init__': ( 'text.core.html#spacytokenizer.__init__',
'fastai/text/core.py'),
'fastai.text.core.TokenizeWithRules': ('text.core.html#tokenizewithrules', 'fastai/text/core.py'),
'fastai.text.core.TokenizeWithRules.__call__': ( 'text.core.html#tokenizewithrules.__call__',
'fastai/text/core.py'),
'fastai.text.core.TokenizeWithRules.__init__': ( 'text.core.html#tokenizewithrules.__init__',
'fastai/text/core.py'),
'fastai.text.core.Tokenizer': ('text.core.html#tokenizer', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.__init__': ('text.core.html#tokenizer.__init__', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer._tokenize1': ('text.core.html#tokenizer._tokenize1', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.decodes': ('text.core.html#tokenizer.decodes', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.encodes': ('text.core.html#tokenizer.encodes', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.from_df': ('text.core.html#tokenizer.from_df', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.from_folder': ('text.core.html#tokenizer.from_folder', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.get_lengths': ('text.core.html#tokenizer.get_lengths', 'fastai/text/core.py'),
'fastai.text.core.Tokenizer.setups': ('text.core.html#tokenizer.setups', 'fastai/text/core.py'),
'fastai.text.core._join_texts': ('text.core.html#_join_texts', 'fastai/text/core.py'),
'fastai.text.core._tokenize_files': ('text.core.html#_tokenize_files', 'fastai/text/core.py'),
'fastai.text.core.fix_html': ('text.core.html#fix_html', 'fastai/text/core.py'),
'fastai.text.core.load_tokenized_csv': ('text.core.html#load_tokenized_csv', 'fastai/text/core.py'),
'fastai.text.core.lowercase': ('text.core.html#lowercase', 'fastai/text/core.py'),
'fastai.text.core.parallel_tokenize': ('text.core.html#parallel_tokenize', 'fastai/text/core.py'),
'fastai.text.core.replace_all_caps': ('text.core.html#replace_all_caps', 'fastai/text/core.py'),
'fastai.text.core.replace_maj': ('text.core.html#replace_maj', 'fastai/text/core.py'),
'fastai.text.core.replace_rep': ('text.core.html#replace_rep', 'fastai/text/core.py'),
'fastai.text.core.replace_space': ('text.core.html#replace_space', 'fastai/text/core.py'),
'fastai.text.core.replace_wrep': ('text.core.html#replace_wrep', 'fastai/text/core.py'),
'fastai.text.core.rm_useless_spaces': ('text.core.html#rm_useless_spaces', 'fastai/text/core.py'),
'fastai.text.core.spec_add_spaces': ('text.core.html#spec_add_spaces', 'fastai/text/core.py'),
'fastai.text.core.tokenize1': ('text.core.html#tokenize1', 'fastai/text/core.py'),
'fastai.text.core.tokenize_csv': ('text.core.html#tokenize_csv', 'fastai/text/core.py'),
'fastai.text.core.tokenize_df': ('text.core.html#tokenize_df', 'fastai/text/core.py'),
'fastai.text.core.tokenize_files': ('text.core.html#tokenize_files', 'fastai/text/core.py'),
'fastai.text.core.tokenize_folder': ('text.core.html#tokenize_folder', 'fastai/text/core.py'),
'fastai.text.core.tokenize_texts': ('text.core.html#tokenize_texts', 'fastai/text/core.py')},
'fastai.text.data': { 'fastai.text.data.LMDataLoader': ('text.data.html#lmdataloader', 'fastai/text/data.py'),
'fastai.text.data.LMDataLoader.__init__': ('text.data.html#lmdataloader.__init__', 'fastai/text/data.py'),
'fastai.text.data.LMDataLoader.create_item': ( 'text.data.html#lmdataloader.create_item',
'fastai/text/data.py'),
'fastai.text.data.LMDataLoader.make_chunks': ( 'text.data.html#lmdataloader.make_chunks',
'fastai/text/data.py'),
'fastai.text.data.LMDataLoader.new': ('text.data.html#lmdataloader.new', 'fastai/text/data.py'),
'fastai.text.data.LMDataLoader.shuffle_fn': ( 'text.data.html#lmdataloader.shuffle_fn',
'fastai/text/data.py'),
'fastai.text.data.LMTensorText': ('text.data.html#lmtensortext', 'fastai/text/data.py'),
'fastai.text.data.Numericalize': ('text.data.html#numericalize', 'fastai/text/data.py'),
'fastai.text.data.Numericalize.__init__': ('text.data.html#numericalize.__init__', 'fastai/text/data.py'),
'fastai.text.data.Numericalize.decodes': ('text.data.html#numericalize.decodes', 'fastai/text/data.py'),
'fastai.text.data.Numericalize.encodes': ('text.data.html#numericalize.encodes', 'fastai/text/data.py'),
'fastai.text.data.Numericalize.setups': ('text.data.html#numericalize.setups', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk': ('text.data.html#pad_chunk', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk.__call__': ('text.data.html#pad_chunk.__call__', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk.__init__': ('text.data.html#pad_chunk.__init__', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk.before_call': ('text.data.html#pad_chunk.before_call', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk.decodes': ('text.data.html#pad_chunk.decodes', 'fastai/text/data.py'),
'fastai.text.data.Pad_Chunk.encodes': ('text.data.html#pad_chunk.encodes', 'fastai/text/data.py'),
'fastai.text.data.Pad_Input': ('text.data.html#pad_input', 'fastai/text/data.py'),
'fastai.text.data.Pad_Input.decodes': ('text.data.html#pad_input.decodes', 'fastai/text/data.py'),
'fastai.text.data.Pad_Input.encodes': ('text.data.html#pad_input.encodes', 'fastai/text/data.py'),
'fastai.text.data.SortedDL': ('text.data.html#sorteddl', 'fastai/text/data.py'),
'fastai.text.data.SortedDL.__init__': ('text.data.html#sorteddl.__init__', 'fastai/text/data.py'),
'fastai.text.data.SortedDL.get_idxs': ('text.data.html#sorteddl.get_idxs', 'fastai/text/data.py'),
'fastai.text.data.SortedDL.new': ('text.data.html#sorteddl.new', 'fastai/text/data.py'),
'fastai.text.data.SortedDL.shuffle_fn': ('text.data.html#sorteddl.shuffle_fn', 'fastai/text/data.py'),
'fastai.text.data.TensorText': ('text.data.html#tensortext', 'fastai/text/data.py'),
'fastai.text.data.TextBlock': ('text.data.html#textblock', 'fastai/text/data.py'),
'fastai.text.data.TextBlock.__init__': ('text.data.html#textblock.__init__', 'fastai/text/data.py'),
'fastai.text.data.TextBlock.from_df': ('text.data.html#textblock.from_df', 'fastai/text/data.py'),
'fastai.text.data.TextBlock.from_folder': ('text.data.html#textblock.from_folder', 'fastai/text/data.py'),
'fastai.text.data.TextDataLoaders': ('text.data.html#textdataloaders', 'fastai/text/data.py'),
'fastai.text.data.TextDataLoaders.from_csv': ( 'text.data.html#textdataloaders.from_csv',
'fastai/text/data.py'),
'fastai.text.data.TextDataLoaders.from_df': ( 'text.data.html#textdataloaders.from_df',
'fastai/text/data.py'),
'fastai.text.data.TextDataLoaders.from_folder': ( 'text.data.html#textdataloaders.from_folder',
'fastai/text/data.py'),
'fastai.text.data._default_sort': ('text.data.html#_default_sort', 'fastai/text/data.py'),
'fastai.text.data._get_lengths': ('text.data.html#_get_lengths', 'fastai/text/data.py'),
'fastai.text.data._get_tokenizer': ('text.data.html#_get_tokenizer', 'fastai/text/data.py'),
'fastai.text.data._maybe_first': ('text.data.html#_maybe_first', 'fastai/text/data.py'),
'fastai.text.data.make_vocab': ('text.data.html#make_vocab', 'fastai/text/data.py'),
'fastai.text.data.pad_chunk': ('text.data.html#pad_chunk', 'fastai/text/data.py'),
'fastai.text.data.pad_input_chunk': ('text.data.html#pad_input_chunk', 'fastai/text/data.py'),
'fastai.text.data.reverse_text': ('text.data.html#reverse_text', 'fastai/text/data.py'),
'fastai.text.data.show_batch': ('text.data.html#show_batch', 'fastai/text/data.py')},
'fastai.text.learner': { 'fastai.text.learner.LMLearner': ('text.learner.html#lmlearner', 'fastai/text/learner.py'),
'fastai.text.learner.LMLearner.get_preds': ( 'text.learner.html#lmlearner.get_preds',
'fastai/text/learner.py'),
'fastai.text.learner.LMLearner.predict': ( 'text.learner.html#lmlearner.predict',
'fastai/text/learner.py'),
'fastai.text.learner.TextLearner': ('text.learner.html#textlearner', 'fastai/text/learner.py'),
'fastai.text.learner.TextLearner.__init__': ( 'text.learner.html#textlearner.__init__',
'fastai/text/learner.py'),
'fastai.text.learner.TextLearner.load': ( 'text.learner.html#textlearner.load',
'fastai/text/learner.py'),
'fastai.text.learner.TextLearner.load_encoder': ( 'text.learner.html#textlearner.load_encoder',
'fastai/text/learner.py'),
'fastai.text.learner.TextLearner.load_pretrained': ( 'text.learner.html#textlearner.load_pretrained',
'fastai/text/learner.py'),
'fastai.text.learner.TextLearner.save_encoder': ( 'text.learner.html#textlearner.save_encoder',
'fastai/text/learner.py'),
'fastai.text.learner._get_text_vocab': ('text.learner.html#_get_text_vocab', 'fastai/text/learner.py'),
'fastai.text.learner._rm_module': ('text.learner.html#_rm_module', 'fastai/text/learner.py'),
'fastai.text.learner.clean_raw_keys': ('text.learner.html#clean_raw_keys', 'fastai/text/learner.py'),
'fastai.text.learner.decode_spec_tokens': ( 'text.learner.html#decode_spec_tokens',
'fastai/text/learner.py'),
'fastai.text.learner.language_model_learner': ( 'text.learner.html#language_model_learner',
'fastai/text/learner.py'),
'fastai.text.learner.load_ignore_keys': ( 'text.learner.html#load_ignore_keys',
'fastai/text/learner.py'),
'fastai.text.learner.load_model_text': ('text.learner.html#load_model_text', 'fastai/text/learner.py'),
'fastai.text.learner.match_embeds': ('text.learner.html#match_embeds', 'fastai/text/learner.py'),
'fastai.text.learner.plot_top_losses': ('text.learner.html#plot_top_losses', 'fastai/text/learner.py'),
'fastai.text.learner.show_results': ('text.learner.html#show_results', 'fastai/text/learner.py'),
'fastai.text.learner.text_classifier_learner': ( 'text.learner.html#text_classifier_learner',
'fastai/text/learner.py')},
'fastai.text.models.awdlstm': { 'fastai.text.models.awdlstm.AWD_LSTM': ( 'text.models.awdlstm.html#awd_lstm',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM.__init__': ( 'text.models.awdlstm.html#awd_lstm.__init__',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM._change_hidden': ( 'text.models.awdlstm.html#awd_lstm._change_hidden',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM._change_one_hidden': ( 'text.models.awdlstm.html#awd_lstm._change_one_hidden',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM._one_hidden': ( 'text.models.awdlstm.html#awd_lstm._one_hidden',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM._one_rnn': ( 'text.models.awdlstm.html#awd_lstm._one_rnn',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM.forward': ( 'text.models.awdlstm.html#awd_lstm.forward',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.AWD_LSTM.reset': ( 'text.models.awdlstm.html#awd_lstm.reset',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.EmbeddingDropout': ( 'text.models.awdlstm.html#embeddingdropout',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.EmbeddingDropout.__init__': ( 'text.models.awdlstm.html#embeddingdropout.__init__',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.EmbeddingDropout.forward': ( 'text.models.awdlstm.html#embeddingdropout.forward',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.RNNDropout': ( 'text.models.awdlstm.html#rnndropout',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.RNNDropout.__init__': ( 'text.models.awdlstm.html#rnndropout.__init__',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.RNNDropout.forward': ( 'text.models.awdlstm.html#rnndropout.forward',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout': ( 'text.models.awdlstm.html#weightdropout',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout.__init__': ( 'text.models.awdlstm.html#weightdropout.__init__',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout._do_nothing': ( 'text.models.awdlstm.html#weightdropout._do_nothing',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout._setweights': ( 'text.models.awdlstm.html#weightdropout._setweights',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout.forward': ( 'text.models.awdlstm.html#weightdropout.forward',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.WeightDropout.reset': ( 'text.models.awdlstm.html#weightdropout.reset',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.awd_lstm_clas_split': ( 'text.models.awdlstm.html#awd_lstm_clas_split',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.awd_lstm_lm_split': ( 'text.models.awdlstm.html#awd_lstm_lm_split',
'fastai/text/models/awdlstm.py'),
'fastai.text.models.awdlstm.dropout_mask': ( 'text.models.awdlstm.html#dropout_mask',
'fastai/text/models/awdlstm.py')},
'fastai.text.models.core': { 'fastai.text.models.core.LinearDecoder': ( 'text.models.core.html#lineardecoder',
'fastai/text/models/core.py'),
'fastai.text.models.core.LinearDecoder.__init__': ( 'text.models.core.html#lineardecoder.__init__',
'fastai/text/models/core.py'),
'fastai.text.models.core.LinearDecoder.forward': ( 'text.models.core.html#lineardecoder.forward',
'fastai/text/models/core.py'),
'fastai.text.models.core.PoolingLinearClassifier': ( 'text.models.core.html#poolinglinearclassifier',
'fastai/text/models/core.py'),
'fastai.text.models.core.PoolingLinearClassifier.__init__': ( 'text.models.core.html#poolinglinearclassifier.__init__',
'fastai/text/models/core.py'),
'fastai.text.models.core.PoolingLinearClassifier.forward': ( 'text.models.core.html#poolinglinearclassifier.forward',
'fastai/text/models/core.py'),
'fastai.text.models.core.SentenceEncoder': ( 'text.models.core.html#sentenceencoder',
'fastai/text/models/core.py'),
'fastai.text.models.core.SentenceEncoder.__init__': ( 'text.models.core.html#sentenceencoder.__init__',
'fastai/text/models/core.py'),
'fastai.text.models.core.SentenceEncoder.forward': ( 'text.models.core.html#sentenceencoder.forward',
'fastai/text/models/core.py'),
'fastai.text.models.core.SentenceEncoder.reset': ( 'text.models.core.html#sentenceencoder.reset',
'fastai/text/models/core.py'),
'fastai.text.models.core.SequentialRNN': ( 'text.models.core.html#sequentialrnn',
'fastai/text/models/core.py'),
'fastai.text.models.core.SequentialRNN.reset': ( 'text.models.core.html#sequentialrnn.reset',
'fastai/text/models/core.py'),
'fastai.text.models.core._pad_tensor': ( 'text.models.core.html#_pad_tensor',
'fastai/text/models/core.py'),
'fastai.text.models.core.get_language_model': ( 'text.models.core.html#get_language_model',
'fastai/text/models/core.py'),
'fastai.text.models.core.get_text_classifier': ( 'text.models.core.html#get_text_classifier',
'fastai/text/models/core.py'),
'fastai.text.models.core.masked_concat_pool': ( 'text.models.core.html#masked_concat_pool',
'fastai/text/models/core.py')},
'fastai.torch_basics': {},
'fastai.torch_core': { 'fastai.torch_core.ArrayBase': ('torch_core.html#arraybase', 'fastai/torch_core.py'),
'fastai.torch_core.ArrayBase._before_cast': ( 'torch_core.html#arraybase._before_cast',
'fastai/torch_core.py'),
'fastai.torch_core.ArrayImage': ('torch_core.html#arrayimage', 'fastai/torch_core.py'),
'fastai.torch_core.ArrayImageBW': ('torch_core.html#arrayimagebw', 'fastai/torch_core.py'),
'fastai.torch_core.ArrayImageBase': ('torch_core.html#arrayimagebase', 'fastai/torch_core.py'),
'fastai.torch_core.ArrayImageBase.show': ('torch_core.html#arrayimagebase.show', 'fastai/torch_core.py'),
'fastai.torch_core.ArrayMask': ('torch_core.html#arraymask', 'fastai/torch_core.py'),
'fastai.torch_core.Chunks': ('torch_core.html#chunks', 'fastai/torch_core.py'),
'fastai.torch_core.Chunks.__getitem__': ('torch_core.html#chunks.__getitem__', 'fastai/torch_core.py'),
'fastai.torch_core.Chunks.__init__': ('torch_core.html#chunks.__init__', 'fastai/torch_core.py'),
'fastai.torch_core.Chunks.doc_idx': ('torch_core.html#chunks.doc_idx', 'fastai/torch_core.py'),
'fastai.torch_core.Chunks.getslice': ('torch_core.html#chunks.getslice', 'fastai/torch_core.py'),
'fastai.torch_core.L.cat': ('torch_core.html#l.cat', 'fastai/torch_core.py'),
'fastai.torch_core.L.stack': ('torch_core.html#l.stack', 'fastai/torch_core.py'),
'fastai.torch_core.L.tensored': ('torch_core.html#l.tensored', 'fastai/torch_core.py'),
'fastai.torch_core.Module': ('torch_core.html#module', 'fastai/torch_core.py'),
'fastai.torch_core.Module.__init__': ('torch_core.html#module.__init__', 'fastai/torch_core.py'),
'fastai.torch_core.Module.__pre_init__': ('torch_core.html#module.__pre_init__', 'fastai/torch_core.py'),
'fastai.torch_core.Path.load_array': ('torch_core.html#path.load_array', 'fastai/torch_core.py'),
'fastai.torch_core.Path.save_array': ('torch_core.html#path.save_array', 'fastai/torch_core.py'),
'fastai.torch_core.ShowTitle': ('torch_core.html#showtitle', 'fastai/torch_core.py'),
'fastai.torch_core.ShowTitle.show': ('torch_core.html#showtitle.show', 'fastai/torch_core.py'),
'fastai.torch_core.Tensor.__array_eq__': ('torch_core.html#tensor.__array_eq__', 'fastai/torch_core.py'),
'fastai.torch_core.Tensor.as_subclass': ('torch_core.html#tensor.as_subclass', 'fastai/torch_core.py'),
'fastai.torch_core.Tensor.interp_1d': ('torch_core.html#tensor.interp_1d', 'fastai/torch_core.py'),
'fastai.torch_core.Tensor.pca': ('torch_core.html#tensor.pca', 'fastai/torch_core.py'),
'fastai.torch_core.Tensor.set_meta': ('torch_core.html#tensor.set_meta', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase': ('torch_core.html#tensorbase', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.__new__': ('torch_core.html#tensorbase.__new__', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.__reduce_ex__': ( 'torch_core.html#tensorbase.__reduce_ex__',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.__repr__': ('torch_core.html#tensorbase.__repr__', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.__torch_function__': ( 'torch_core.html#tensorbase.__torch_function__',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase._before_cast': ( 'torch_core.html#tensorbase._before_cast',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.clone': ('torch_core.html#tensorbase.clone', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.new': ('torch_core.html#tensorbase.new', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.new_empty': ( 'torch_core.html#tensorbase.new_empty',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.new_ones': ('torch_core.html#tensorbase.new_ones', 'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.new_tensor': ( 'torch_core.html#tensorbase.new_tensor',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.register_func': ( 'torch_core.html#tensorbase.register_func',
'fastai/torch_core.py'),
'fastai.torch_core.TensorBase.requires_grad_': ( 'torch_core.html#tensorbase.requires_grad_',
'fastai/torch_core.py'),
'fastai.torch_core.TensorCategory': ('torch_core.html#tensorcategory', 'fastai/torch_core.py'),
'fastai.torch_core.TensorFlowField': ('torch_core.html#tensorflowfield', 'fastai/torch_core.py'),
'fastai.torch_core.TensorImage': ('torch_core.html#tensorimage', 'fastai/torch_core.py'),
'fastai.torch_core.TensorImageBW': ('torch_core.html#tensorimagebw', 'fastai/torch_core.py'),
'fastai.torch_core.TensorImageBase': ('torch_core.html#tensorimagebase', 'fastai/torch_core.py'),
'fastai.torch_core.TensorImageBase.show': ( 'torch_core.html#tensorimagebase.show',
'fastai/torch_core.py'),
'fastai.torch_core.TensorMask': ('torch_core.html#tensormask', 'fastai/torch_core.py'),
'fastai.torch_core.TensorMask.show': ('torch_core.html#tensormask.show', 'fastai/torch_core.py'),
'fastai.torch_core.TensorMultiCategory': ('torch_core.html#tensormulticategory', 'fastai/torch_core.py'),
'fastai.torch_core.TitledFloat': ('torch_core.html#titledfloat', 'fastai/torch_core.py'),
'fastai.torch_core.TitledFloat.show': ('torch_core.html#titledfloat.show', 'fastai/torch_core.py'),
'fastai.torch_core.TitledInt': ('torch_core.html#titledint', 'fastai/torch_core.py'),
'fastai.torch_core.TitledInt.show': ('torch_core.html#titledint.show', 'fastai/torch_core.py'),
'fastai.torch_core.TitledStr': ('torch_core.html#titledstr', 'fastai/torch_core.py'),
'fastai.torch_core.TitledStr.show': ('torch_core.html#titledstr.show', 'fastai/torch_core.py'),
'fastai.torch_core.TitledStr.truncate': ('torch_core.html#titledstr.truncate', 'fastai/torch_core.py'),
'fastai.torch_core.TitledTensorScalar': ('torch_core.html#titledtensorscalar', 'fastai/torch_core.py'),
'fastai.torch_core.TitledTensorScalar.show': ( 'torch_core.html#titledtensorscalar.show',
'fastai/torch_core.py'),
'fastai.torch_core.TitledTuple': ('torch_core.html#titledtuple', 'fastai/torch_core.py'),
'fastai.torch_core.TitledTuple.show': ('torch_core.html#titledtuple.show', 'fastai/torch_core.py'),
'fastai.torch_core._array2tensor': ('torch_core.html#_array2tensor', 'fastai/torch_core.py'),
'fastai.torch_core._comp_filter': ('torch_core.html#_comp_filter', 'fastai/torch_core.py'),
'fastai.torch_core._fa_rebuild_qtensor': ('torch_core.html#_fa_rebuild_qtensor', 'fastai/torch_core.py'),
'fastai.torch_core._fa_rebuild_tensor': ('torch_core.html#_fa_rebuild_tensor', 'fastai/torch_core.py'),
'fastai.torch_core._fig_bounds': ('torch_core.html#_fig_bounds', 'fastai/torch_core.py'),
'fastai.torch_core._find_args': ('torch_core.html#_find_args', 'fastai/torch_core.py'),
'fastai.torch_core._has_mps': ('torch_core.html#_has_mps', 'fastai/torch_core.py'),
'fastai.torch_core._rebuild_from_type': ('torch_core.html#_rebuild_from_type', 'fastai/torch_core.py'),
'fastai.torch_core._torch_handled': ('torch_core.html#_torch_handled', 'fastai/torch_core.py'),
'fastai.torch_core.apply': ('torch_core.html#apply', 'fastai/torch_core.py'),
'fastai.torch_core.apply_init': ('torch_core.html#apply_init', 'fastai/torch_core.py'),
'fastai.torch_core.apply_leaf': ('torch_core.html#apply_leaf', 'fastai/torch_core.py'),
'fastai.torch_core.base_doc': ('torch_core.html#base_doc', 'fastai/torch_core.py'),
'fastai.torch_core.batch_to_samples': ('torch_core.html#batch_to_samples', 'fastai/torch_core.py'),
'fastai.torch_core.concat': ('torch_core.html#concat', 'fastai/torch_core.py'),
'fastai.torch_core.cond_init': ('torch_core.html#cond_init', 'fastai/torch_core.py'),
'fastai.torch_core.default_device': ('torch_core.html#default_device', 'fastai/torch_core.py'),
'fastai.torch_core.display_df': ('torch_core.html#display_df', 'fastai/torch_core.py'),
'fastai.torch_core.distrib_barrier': ('torch_core.html#distrib_barrier', 'fastai/torch_core.py'),
'fastai.torch_core.doc': ('torch_core.html#doc', 'fastai/torch_core.py'),
'fastai.torch_core.find_bs': ('torch_core.html#find_bs', 'fastai/torch_core.py'),
'fastai.torch_core.find_device': ('torch_core.html#find_device', 'fastai/torch_core.py'),
'fastai.torch_core.flatten_check': ('torch_core.html#flatten_check', 'fastai/torch_core.py'),
'fastai.torch_core.get_empty_df': ('torch_core.html#get_empty_df', 'fastai/torch_core.py'),
'fastai.torch_core.get_first': ('torch_core.html#get_first', 'fastai/torch_core.py'),
'fastai.torch_core.get_model': ('torch_core.html#get_model', 'fastai/torch_core.py'),
'fastai.torch_core.get_random_states': ('torch_core.html#get_random_states', 'fastai/torch_core.py'),
'fastai.torch_core.grad_module': ('torch_core.html#grad_module', 'fastai/torch_core.py'),
'fastai.torch_core.init_default': ('torch_core.html#init_default', 'fastai/torch_core.py'),
'fastai.torch_core.ismin_torch': ('torch_core.html#ismin_torch', 'fastai/torch_core.py'),
'fastai.torch_core.item_find': ('torch_core.html#item_find', 'fastai/torch_core.py'),
'fastai.torch_core.logit': ('torch_core.html#logit', 'fastai/torch_core.py'),
'fastai.torch_core.make_cross_image': ('torch_core.html#make_cross_image', 'fastai/torch_core.py'),
'fastai.torch_core.maybe_gather': ('torch_core.html#maybe_gather', 'fastai/torch_core.py'),
'fastai.torch_core.nested_reorder': ('torch_core.html#nested_reorder', 'fastai/torch_core.py'),
'fastai.torch_core.no_random': ('torch_core.html#no_random', 'fastai/torch_core.py'),
'fastai.torch_core.norm_bias_params': ('torch_core.html#norm_bias_params', 'fastai/torch_core.py'),
'fastai.torch_core.notmax_torch': ('torch_core.html#notmax_torch', 'fastai/torch_core.py'),
'fastai.torch_core.np_func': ('torch_core.html#np_func', 'fastai/torch_core.py'),
'fastai.torch_core.num_distrib': ('torch_core.html#num_distrib', 'fastai/torch_core.py'),
'fastai.torch_core.one_hot': ('torch_core.html#one_hot', 'fastai/torch_core.py'),
'fastai.torch_core.one_hot_decode': ('torch_core.html#one_hot_decode', 'fastai/torch_core.py'),
'fastai.torch_core.one_param': ('torch_core.html#one_param', 'fastai/torch_core.py'),
'fastai.torch_core.params': ('torch_core.html#params', 'fastai/torch_core.py'),
'fastai.torch_core.pd.DataFrame.__init__': ( 'torch_core.html#pd.dataframe.__init__',
'fastai/torch_core.py'),
'fastai.torch_core.rank_distrib': ('torch_core.html#rank_distrib', 'fastai/torch_core.py'),
'fastai.torch_core.requires_grad': ('torch_core.html#requires_grad', 'fastai/torch_core.py'),
'fastai.torch_core.script_bwd': ('torch_core.html#script_bwd', 'fastai/torch_core.py'),
'fastai.torch_core.script_fwd': ('torch_core.html#script_fwd', 'fastai/torch_core.py'),
'fastai.torch_core.script_save_ctx': ('torch_core.html#script_save_ctx', 'fastai/torch_core.py'),
'fastai.torch_core.script_use_ctx': ('torch_core.html#script_use_ctx', 'fastai/torch_core.py'),
'fastai.torch_core.set_random_states': ('torch_core.html#set_random_states', 'fastai/torch_core.py'),
'fastai.torch_core.set_seed': ('torch_core.html#set_seed', 'fastai/torch_core.py'),
'fastai.torch_core.setup_cuda': ('torch_core.html#setup_cuda', 'fastai/torch_core.py'),
'fastai.torch_core.show_image': ('torch_core.html#show_image', 'fastai/torch_core.py'),
'fastai.torch_core.show_image_batch': ('torch_core.html#show_image_batch', 'fastai/torch_core.py'),
'fastai.torch_core.show_images': ('torch_core.html#show_images', 'fastai/torch_core.py'),
'fastai.torch_core.show_title': ('torch_core.html#show_title', 'fastai/torch_core.py'),
'fastai.torch_core.show_titled_image': ('torch_core.html#show_titled_image', 'fastai/torch_core.py'),
'fastai.torch_core.subplots': ('torch_core.html#subplots', 'fastai/torch_core.py'),
'fastai.torch_core.tensor': ('torch_core.html#tensor', 'fastai/torch_core.py'),
'fastai.torch_core.to_concat': ('torch_core.html#to_concat', 'fastai/torch_core.py'),
'fastai.torch_core.to_cpu': ('torch_core.html#to_cpu', 'fastai/torch_core.py'),
'fastai.torch_core.to_detach': ('torch_core.html#to_detach', 'fastai/torch_core.py'),
'fastai.torch_core.to_device': ('torch_core.html#to_device', 'fastai/torch_core.py'),
'fastai.torch_core.to_float': ('torch_core.html#to_float', 'fastai/torch_core.py'),
'fastai.torch_core.to_half': ('torch_core.html#to_half', 'fastai/torch_core.py'),
'fastai.torch_core.to_np': ('torch_core.html#to_np', 'fastai/torch_core.py'),
'fastai.torch_core.trainable_params': ('torch_core.html#trainable_params', 'fastai/torch_core.py'),
'fastai.torch_core.unsqueeze': ('torch_core.html#unsqueeze', 'fastai/torch_core.py'),
'fastai.torch_core.unsqueeze_': ('torch_core.html#unsqueeze_', 'fastai/torch_core.py')},
'fastai.torch_imports': {},
'fastai.vision.all': {},
'fastai.vision.augment': { 'fastai.vision.augment.AffineCoordTfm': ( 'vision.augment.html#affinecoordtfm',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm.__init__': ( 'vision.augment.html#affinecoordtfm.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm._encode': ( 'vision.augment.html#affinecoordtfm._encode',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm._get_affine_mat': ( 'vision.augment.html#affinecoordtfm._get_affine_mat',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm.before_call': ( 'vision.augment.html#affinecoordtfm.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm.compose': ( 'vision.augment.html#affinecoordtfm.compose',
'fastai/vision/augment.py'),
'fastai.vision.augment.AffineCoordTfm.encodes': ( 'vision.augment.html#affinecoordtfm.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.Brightness': ('vision.augment.html#brightness', 'fastai/vision/augment.py'),
'fastai.vision.augment.Brightness.__init__': ( 'vision.augment.html#brightness.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Contrast': ('vision.augment.html#contrast', 'fastai/vision/augment.py'),
'fastai.vision.augment.Contrast.__init__': ( 'vision.augment.html#contrast.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.CropPad': ('vision.augment.html#croppad', 'fastai/vision/augment.py'),
'fastai.vision.augment.CropPad.__init__': ( 'vision.augment.html#croppad.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.CropPad.encodes': ( 'vision.augment.html#croppad.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicDihedral': ( 'vision.augment.html#deterministicdihedral',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicDihedral.__init__': ( 'vision.augment.html#deterministicdihedral.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicDraw': ( 'vision.augment.html#deterministicdraw',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicDraw.__call__': ( 'vision.augment.html#deterministicdraw.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicDraw.__init__': ( 'vision.augment.html#deterministicdraw.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicFlip': ( 'vision.augment.html#deterministicflip',
'fastai/vision/augment.py'),
'fastai.vision.augment.DeterministicFlip.__init__': ( 'vision.augment.html#deterministicflip.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Dihedral': ('vision.augment.html#dihedral', 'fastai/vision/augment.py'),
'fastai.vision.augment.Dihedral.__init__': ( 'vision.augment.html#dihedral.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.DihedralItem': ( 'vision.augment.html#dihedralitem',
'fastai/vision/augment.py'),
'fastai.vision.augment.DihedralItem.before_call': ( 'vision.augment.html#dihedralitem.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.DihedralItem.encodes': ( 'vision.augment.html#dihedralitem.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.Flip': ('vision.augment.html#flip', 'fastai/vision/augment.py'),
'fastai.vision.augment.Flip.__init__': ( 'vision.augment.html#flip.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.FlipItem': ('vision.augment.html#flipitem', 'fastai/vision/augment.py'),
'fastai.vision.augment.FlipItem.__init__': ( 'vision.augment.html#flipitem.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.FlipItem.encodes': ( 'vision.augment.html#flipitem.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.HSVTfm': ('vision.augment.html#hsvtfm', 'fastai/vision/augment.py'),
'fastai.vision.augment.HSVTfm.__init__': ( 'vision.augment.html#hsvtfm.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Hue': ('vision.augment.html#hue', 'fastai/vision/augment.py'),
'fastai.vision.augment.Hue.__init__': ( 'vision.augment.html#hue.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Image.Image._do_crop_pad': ( 'vision.augment.html#image.image._do_crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.Image.Image.crop_pad': ( 'vision.augment.html#image.image.crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.Image.Image.flip_lr': ( 'vision.augment.html#image.image.flip_lr',
'fastai/vision/augment.py'),
'fastai.vision.augment.LightingTfm': ('vision.augment.html#lightingtfm', 'fastai/vision/augment.py'),
'fastai.vision.augment.LightingTfm.__init__': ( 'vision.augment.html#lightingtfm.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.OldRandomCrop': ( 'vision.augment.html#oldrandomcrop',
'fastai/vision/augment.py'),
'fastai.vision.augment.OldRandomCrop.before_call': ( 'vision.augment.html#oldrandomcrop.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.PILImage.dihedral': ( 'vision.augment.html#pilimage.dihedral',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandTransform': ( 'vision.augment.html#randtransform',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandTransform.__call__': ( 'vision.augment.html#randtransform.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandTransform.__init__': ( 'vision.augment.html#randtransform.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandTransform.before_call': ( 'vision.augment.html#randtransform.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomCrop': ('vision.augment.html#randomcrop', 'fastai/vision/augment.py'),
'fastai.vision.augment.RandomCrop.__init__': ( 'vision.augment.html#randomcrop.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomCrop.before_call': ( 'vision.augment.html#randomcrop.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomCrop.encodes': ( 'vision.augment.html#randomcrop.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomErasing': ( 'vision.augment.html#randomerasing',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomErasing.__init__': ( 'vision.augment.html#randomerasing.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomErasing._bounds': ( 'vision.augment.html#randomerasing._bounds',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomErasing.encodes': ( 'vision.augment.html#randomerasing.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCrop': ( 'vision.augment.html#randomresizedcrop',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCrop.__init__': ( 'vision.augment.html#randomresizedcrop.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCrop.before_call': ( 'vision.augment.html#randomresizedcrop.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCrop.encodes': ( 'vision.augment.html#randomresizedcrop.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCropGPU': ( 'vision.augment.html#randomresizedcropgpu',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCropGPU.__init__': ( 'vision.augment.html#randomresizedcropgpu.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCropGPU._encode': ( 'vision.augment.html#randomresizedcropgpu._encode',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCropGPU.before_call': ( 'vision.augment.html#randomresizedcropgpu.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.RandomResizedCropGPU.encodes': ( 'vision.augment.html#randomresizedcropgpu.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.RatioResize': ('vision.augment.html#ratioresize', 'fastai/vision/augment.py'),
'fastai.vision.augment.RatioResize.__init__': ( 'vision.augment.html#ratioresize.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.RatioResize.encodes': ( 'vision.augment.html#ratioresize.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.Resize': ('vision.augment.html#resize', 'fastai/vision/augment.py'),
'fastai.vision.augment.Resize.__init__': ( 'vision.augment.html#resize.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Resize.before_call': ( 'vision.augment.html#resize.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.Resize.encodes': ( 'vision.augment.html#resize.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.Rotate': ('vision.augment.html#rotate', 'fastai/vision/augment.py'),
'fastai.vision.augment.Rotate.__init__': ( 'vision.augment.html#rotate.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Saturation': ('vision.augment.html#saturation', 'fastai/vision/augment.py'),
'fastai.vision.augment.Saturation.__init__': ( 'vision.augment.html#saturation.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.SpaceTfm': ('vision.augment.html#spacetfm', 'fastai/vision/augment.py'),
'fastai.vision.augment.SpaceTfm.__init__': ( 'vision.augment.html#spacetfm.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.SpaceTfm.before_call': ( 'vision.augment.html#spacetfm.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment.SpaceTfm.compose': ( 'vision.augment.html#spacetfm.compose',
'fastai/vision/augment.py'),
'fastai.vision.augment.SpaceTfm.encodes': ( 'vision.augment.html#spacetfm.encodes',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox._do_crop_pad': ( 'vision.augment.html#tensorbbox._do_crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.affine_coord': ( 'vision.augment.html#tensorbbox.affine_coord',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.crop_pad': ( 'vision.augment.html#tensorbbox.crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.dihedral': ( 'vision.augment.html#tensorbbox.dihedral',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.dihedral_batch': ( 'vision.augment.html#tensorbbox.dihedral_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.flip_batch': ( 'vision.augment.html#tensorbbox.flip_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.flip_lr': ( 'vision.augment.html#tensorbbox.flip_lr',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.rotate': ( 'vision.augment.html#tensorbbox.rotate',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.warp': ( 'vision.augment.html#tensorbbox.warp',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorBBox.zoom': ( 'vision.augment.html#tensorbbox.zoom',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.affine_coord': ( 'vision.augment.html#tensorimage.affine_coord',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.brightness': ( 'vision.augment.html#tensorimage.brightness',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.contrast': ( 'vision.augment.html#tensorimage.contrast',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.dihedral': ( 'vision.augment.html#tensorimage.dihedral',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.dihedral_batch': ( 'vision.augment.html#tensorimage.dihedral_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.flip_batch': ( 'vision.augment.html#tensorimage.flip_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.hsv': ( 'vision.augment.html#tensorimage.hsv',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.hue': ( 'vision.augment.html#tensorimage.hue',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.lighting': ( 'vision.augment.html#tensorimage.lighting',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.rotate': ( 'vision.augment.html#tensorimage.rotate',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.saturation': ( 'vision.augment.html#tensorimage.saturation',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.warp': ( 'vision.augment.html#tensorimage.warp',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImage.zoom': ( 'vision.augment.html#tensorimage.zoom',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorImageBase.flip_lr': ( 'vision.augment.html#tensorimagebase.flip_lr',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.affine_coord': ( 'vision.augment.html#tensormask.affine_coord',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.dihedral_batch': ( 'vision.augment.html#tensormask.dihedral_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.flip_batch': ( 'vision.augment.html#tensormask.flip_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.rotate': ( 'vision.augment.html#tensormask.rotate',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.warp': ( 'vision.augment.html#tensormask.warp',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorMask.zoom': ( 'vision.augment.html#tensormask.zoom',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint._do_crop_pad': ( 'vision.augment.html#tensorpoint._do_crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.affine_coord': ( 'vision.augment.html#tensorpoint.affine_coord',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.crop_pad': ( 'vision.augment.html#tensorpoint.crop_pad',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.dihedral': ( 'vision.augment.html#tensorpoint.dihedral',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.dihedral_batch': ( 'vision.augment.html#tensorpoint.dihedral_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.flip_batch': ( 'vision.augment.html#tensorpoint.flip_batch',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.flip_lr': ( 'vision.augment.html#tensorpoint.flip_lr',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.rotate': ( 'vision.augment.html#tensorpoint.rotate',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.warp': ( 'vision.augment.html#tensorpoint.warp',
'fastai/vision/augment.py'),
'fastai.vision.augment.TensorPoint.zoom': ( 'vision.augment.html#tensorpoint.zoom',
'fastai/vision/augment.py'),
'fastai.vision.augment.Warp': ('vision.augment.html#warp', 'fastai/vision/augment.py'),
'fastai.vision.augment.Warp.__init__': ( 'vision.augment.html#warp.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment.Zoom': ('vision.augment.html#zoom', 'fastai/vision/augment.py'),
'fastai.vision.augment.Zoom.__init__': ( 'vision.augment.html#zoom.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._BrightnessLogit': ( 'vision.augment.html#_brightnesslogit',
'fastai/vision/augment.py'),
'fastai.vision.augment._BrightnessLogit.__call__': ( 'vision.augment.html#_brightnesslogit.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment._BrightnessLogit.__init__': ( 'vision.augment.html#_brightnesslogit.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._BrightnessLogit._def_draw': ( 'vision.augment.html#_brightnesslogit._def_draw',
'fastai/vision/augment.py'),
'fastai.vision.augment._BrightnessLogit.before_call': ( 'vision.augment.html#_brightnesslogit.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment._ContrastLogit': ( 'vision.augment.html#_contrastlogit',
'fastai/vision/augment.py'),
'fastai.vision.augment._ContrastLogit.__call__': ( 'vision.augment.html#_contrastlogit.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment._ContrastLogit.__init__': ( 'vision.augment.html#_contrastlogit.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._ContrastLogit._def_draw': ( 'vision.augment.html#_contrastlogit._def_draw',
'fastai/vision/augment.py'),
'fastai.vision.augment._ContrastLogit.before_call': ( 'vision.augment.html#_contrastlogit.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment._Hue': ('vision.augment.html#_hue', 'fastai/vision/augment.py'),
'fastai.vision.augment._Hue.__call__': ( 'vision.augment.html#_hue.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment._Hue.__init__': ( 'vision.augment.html#_hue.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._Hue._def_draw': ( 'vision.augment.html#_hue._def_draw',
'fastai/vision/augment.py'),
'fastai.vision.augment._Hue.before_call': ( 'vision.augment.html#_hue.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment._SaturationLogit': ( 'vision.augment.html#_saturationlogit',
'fastai/vision/augment.py'),
'fastai.vision.augment._SaturationLogit.__call__': ( 'vision.augment.html#_saturationlogit.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment._SaturationLogit.__init__': ( 'vision.augment.html#_saturationlogit.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._SaturationLogit._def_draw': ( 'vision.augment.html#_saturationlogit._def_draw',
'fastai/vision/augment.py'),
'fastai.vision.augment._SaturationLogit.before_call': ( 'vision.augment.html#_saturationlogit.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment._WarpCoord': ('vision.augment.html#_warpcoord', 'fastai/vision/augment.py'),
'fastai.vision.augment._WarpCoord.__call__': ( 'vision.augment.html#_warpcoord.__call__',
'fastai/vision/augment.py'),
'fastai.vision.augment._WarpCoord.__init__': ( 'vision.augment.html#_warpcoord.__init__',
'fastai/vision/augment.py'),
'fastai.vision.augment._WarpCoord._def_draw': ( 'vision.augment.html#_warpcoord._def_draw',
'fastai/vision/augment.py'),
'fastai.vision.augment._WarpCoord.before_call': ( 'vision.augment.html#_warpcoord.before_call',
'fastai/vision/augment.py'),
'fastai.vision.augment._compose_same_tfms': ( 'vision.augment.html#_compose_same_tfms',
'fastai/vision/augment.py'),
'fastai.vision.augment._draw_mask': ('vision.augment.html#_draw_mask', 'fastai/vision/augment.py'),
'fastai.vision.augment._get_default': ( 'vision.augment.html#_get_default',
'fastai/vision/augment.py'),
'fastai.vision.augment._get_sz': ('vision.augment.html#_get_sz', 'fastai/vision/augment.py'),
'fastai.vision.augment._grid_sample': ( 'vision.augment.html#_grid_sample',
'fastai/vision/augment.py'),
'fastai.vision.augment._init_mat': ('vision.augment.html#_init_mat', 'fastai/vision/augment.py'),
'fastai.vision.augment._linalg_solve': ( 'vision.augment.html#_linalg_solve',
'fastai/vision/augment.py'),
'fastai.vision.augment._neg_axis': ('vision.augment.html#_neg_axis', 'fastai/vision/augment.py'),
'fastai.vision.augment._prepare_mat': ( 'vision.augment.html#_prepare_mat',
'fastai/vision/augment.py'),
'fastai.vision.augment._process_sz': ('vision.augment.html#_process_sz', 'fastai/vision/augment.py'),
'fastai.vision.augment._slice': ('vision.augment.html#_slice', 'fastai/vision/augment.py'),
'fastai.vision.augment._solve': ('vision.augment.html#_solve', 'fastai/vision/augment.py'),
'fastai.vision.augment.affine_grid': ('vision.augment.html#affine_grid', 'fastai/vision/augment.py'),
'fastai.vision.augment.affine_mat': ('vision.augment.html#affine_mat', 'fastai/vision/augment.py'),
'fastai.vision.augment.apply_perspective': ( 'vision.augment.html#apply_perspective',
'fastai/vision/augment.py'),
'fastai.vision.augment.aug_transforms': ( 'vision.augment.html#aug_transforms',
'fastai/vision/augment.py'),
'fastai.vision.augment.cutout_gaussian': ( 'vision.augment.html#cutout_gaussian',
'fastai/vision/augment.py'),
'fastai.vision.augment.dihedral_mat': ( 'vision.augment.html#dihedral_mat',
'fastai/vision/augment.py'),
'fastai.vision.augment.find_coeffs': ('vision.augment.html#find_coeffs', 'fastai/vision/augment.py'),
'fastai.vision.augment.flip_mat': ('vision.augment.html#flip_mat', 'fastai/vision/augment.py'),
'fastai.vision.augment.grayscale': ('vision.augment.html#grayscale', 'fastai/vision/augment.py'),
'fastai.vision.augment.hsv2rgb': ('vision.augment.html#hsv2rgb', 'fastai/vision/augment.py'),
'fastai.vision.augment.mask_tensor': ('vision.augment.html#mask_tensor', 'fastai/vision/augment.py'),
'fastai.vision.augment.norm_apply_denorm': ( 'vision.augment.html#norm_apply_denorm',
'fastai/vision/augment.py'),
'fastai.vision.augment.rgb2hsv': ('vision.augment.html#rgb2hsv', 'fastai/vision/augment.py'),
'fastai.vision.augment.rotate_mat': ('vision.augment.html#rotate_mat', 'fastai/vision/augment.py'),
'fastai.vision.augment.setup_aug_tfms': ( 'vision.augment.html#setup_aug_tfms',
'fastai/vision/augment.py'),
'fastai.vision.augment.zoom_mat': ('vision.augment.html#zoom_mat', 'fastai/vision/augment.py')},
'fastai.vision.core': { 'fastai.vision.core.AddMaskCodes': ('vision.core.html#addmaskcodes', 'fastai/vision/core.py'),
'fastai.vision.core.AddMaskCodes.__init__': ( 'vision.core.html#addmaskcodes.__init__',
'fastai/vision/core.py'),
'fastai.vision.core.AddMaskCodes.decodes': ( 'vision.core.html#addmaskcodes.decodes',
'fastai/vision/core.py'),
'fastai.vision.core.BBoxLabeler': ('vision.core.html#bboxlabeler', 'fastai/vision/core.py'),
'fastai.vision.core.BBoxLabeler.decode': ( 'vision.core.html#bboxlabeler.decode',
'fastai/vision/core.py'),
'fastai.vision.core.BBoxLabeler.decodes': ( 'vision.core.html#bboxlabeler.decodes',
'fastai/vision/core.py'),
'fastai.vision.core.BBoxLabeler.setups': ( 'vision.core.html#bboxlabeler.setups',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.__repr__': ( 'vision.core.html#image.image.__repr__',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.aspect': ( 'vision.core.html#image.image.aspect',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.n_px': ('vision.core.html#image.image.n_px', 'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.reshape': ( 'vision.core.html#image.image.reshape',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.resize_max': ( 'vision.core.html#image.image.resize_max',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.shape': ('vision.core.html#image.image.shape', 'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.to_bytes_format': ( 'vision.core.html#image.image.to_bytes_format',
'fastai/vision/core.py'),
'fastai.vision.core.Image.Image.to_thumb': ( 'vision.core.html#image.image.to_thumb',
'fastai/vision/core.py'),
'fastai.vision.core.LabeledBBox': ('vision.core.html#labeledbbox', 'fastai/vision/core.py'),
'fastai.vision.core.LabeledBBox.show': ('vision.core.html#labeledbbox.show', 'fastai/vision/core.py'),
'fastai.vision.core.PILBase': ('vision.core.html#pilbase', 'fastai/vision/core.py'),
'fastai.vision.core.PILBase.__repr__': ('vision.core.html#pilbase.__repr__', 'fastai/vision/core.py'),
'fastai.vision.core.PILBase.create': ('vision.core.html#pilbase.create', 'fastai/vision/core.py'),
'fastai.vision.core.PILBase.show': ('vision.core.html#pilbase.show', 'fastai/vision/core.py'),
'fastai.vision.core.PILImage': ('vision.core.html#pilimage', 'fastai/vision/core.py'),
'fastai.vision.core.PILImageBW': ('vision.core.html#pilimagebw', 'fastai/vision/core.py'),
'fastai.vision.core.PILMask': ('vision.core.html#pilmask', 'fastai/vision/core.py'),
'fastai.vision.core.PointScaler': ('vision.core.html#pointscaler', 'fastai/vision/core.py'),
'fastai.vision.core.PointScaler.__init__': ( 'vision.core.html#pointscaler.__init__',
'fastai/vision/core.py'),
'fastai.vision.core.PointScaler._get_sz': ( 'vision.core.html#pointscaler._get_sz',
'fastai/vision/core.py'),
'fastai.vision.core.PointScaler._grab_sz': ( 'vision.core.html#pointscaler._grab_sz',
'fastai/vision/core.py'),
'fastai.vision.core.PointScaler.decodes': ( 'vision.core.html#pointscaler.decodes',
'fastai/vision/core.py'),
'fastai.vision.core.PointScaler.encodes': ( 'vision.core.html#pointscaler.encodes',
'fastai/vision/core.py'),
'fastai.vision.core.PointScaler.setups': ( 'vision.core.html#pointscaler.setups',
'fastai/vision/core.py'),
'fastai.vision.core.TensorBBox': ('vision.core.html#tensorbbox', 'fastai/vision/core.py'),
'fastai.vision.core.TensorBBox.create': ('vision.core.html#tensorbbox.create', 'fastai/vision/core.py'),
'fastai.vision.core.TensorBBox.show': ('vision.core.html#tensorbbox.show', 'fastai/vision/core.py'),
'fastai.vision.core.TensorPoint': ('vision.core.html#tensorpoint', 'fastai/vision/core.py'),
'fastai.vision.core.TensorPoint.create': ( 'vision.core.html#tensorpoint.create',
'fastai/vision/core.py'),
'fastai.vision.core.TensorPoint.show': ('vision.core.html#tensorpoint.show', 'fastai/vision/core.py'),
'fastai.vision.core._draw_outline': ('vision.core.html#_draw_outline', 'fastai/vision/core.py'),
'fastai.vision.core._draw_rect': ('vision.core.html#_draw_rect', 'fastai/vision/core.py'),
'fastai.vision.core._scale_pnts': ('vision.core.html#_scale_pnts', 'fastai/vision/core.py'),
'fastai.vision.core._unscale_pnts': ('vision.core.html#_unscale_pnts', 'fastai/vision/core.py'),
'fastai.vision.core.decodes': ('vision.core.html#decodes', 'fastai/vision/core.py'),
'fastai.vision.core.encodes': ('vision.core.html#encodes', 'fastai/vision/core.py'),
'fastai.vision.core.get_annotations': ('vision.core.html#get_annotations', 'fastai/vision/core.py'),
'fastai.vision.core.image2tensor': ('vision.core.html#image2tensor', 'fastai/vision/core.py'),
'fastai.vision.core.load_image': ('vision.core.html#load_image', 'fastai/vision/core.py'),
'fastai.vision.core.to_image': ('vision.core.html#to_image', 'fastai/vision/core.py')},
'fastai.vision.data': { 'fastai.vision.data.BBoxLblBlock': ('vision.data.html#bboxlblblock', 'fastai/vision/data.py'),
'fastai.vision.data.ImageBlock': ('vision.data.html#imageblock', 'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders': ('vision.data.html#imagedataloaders', 'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_csv': ( 'vision.data.html#imagedataloaders.from_csv',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_df': ( 'vision.data.html#imagedataloaders.from_df',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_folder': ( 'vision.data.html#imagedataloaders.from_folder',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_lists': ( 'vision.data.html#imagedataloaders.from_lists',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_name_func': ( 'vision.data.html#imagedataloaders.from_name_func',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_name_re': ( 'vision.data.html#imagedataloaders.from_name_re',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_path_func': ( 'vision.data.html#imagedataloaders.from_path_func',
'fastai/vision/data.py'),
'fastai.vision.data.ImageDataLoaders.from_path_re': ( 'vision.data.html#imagedataloaders.from_path_re',
'fastai/vision/data.py'),
'fastai.vision.data.MaskBlock': ('vision.data.html#maskblock', 'fastai/vision/data.py'),
'fastai.vision.data.SegmentationDataLoaders': ( 'vision.data.html#segmentationdataloaders',
'fastai/vision/data.py'),
'fastai.vision.data.SegmentationDataLoaders.from_label_func': ( 'vision.data.html#segmentationdataloaders.from_label_func',
'fastai/vision/data.py'),
'fastai.vision.data.bb_pad': ('vision.data.html#bb_pad', 'fastai/vision/data.py'),
'fastai.vision.data.clip_remove_empty': ('vision.data.html#clip_remove_empty', 'fastai/vision/data.py'),
'fastai.vision.data.get_grid': ('vision.data.html#get_grid', 'fastai/vision/data.py'),
'fastai.vision.data.show_batch': ('vision.data.html#show_batch', 'fastai/vision/data.py')},
'fastai.vision.gan': { 'fastai.vision.gan.AdaptiveGANSwitcher': ('vision.gan.html#adaptiveganswitcher', 'fastai/vision/gan.py'),
'fastai.vision.gan.AdaptiveGANSwitcher.__init__': ( 'vision.gan.html#adaptiveganswitcher.__init__',
'fastai/vision/gan.py'),
'fastai.vision.gan.AdaptiveGANSwitcher.after_batch': ( 'vision.gan.html#adaptiveganswitcher.after_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.AdaptiveLoss': ('vision.gan.html#adaptiveloss', 'fastai/vision/gan.py'),
'fastai.vision.gan.AdaptiveLoss.__init__': ( 'vision.gan.html#adaptiveloss.__init__',
'fastai/vision/gan.py'),
'fastai.vision.gan.AdaptiveLoss.forward': ( 'vision.gan.html#adaptiveloss.forward',
'fastai/vision/gan.py'),
'fastai.vision.gan.AddChannels': ('vision.gan.html#addchannels', 'fastai/vision/gan.py'),
'fastai.vision.gan.AddChannels.__init__': ( 'vision.gan.html#addchannels.__init__',
'fastai/vision/gan.py'),
'fastai.vision.gan.AddChannels.forward': ('vision.gan.html#addchannels.forward', 'fastai/vision/gan.py'),
'fastai.vision.gan.DenseResBlock': ('vision.gan.html#denseresblock', 'fastai/vision/gan.py'),
'fastai.vision.gan.FixedGANSwitcher': ('vision.gan.html#fixedganswitcher', 'fastai/vision/gan.py'),
'fastai.vision.gan.FixedGANSwitcher.__init__': ( 'vision.gan.html#fixedganswitcher.__init__',
'fastai/vision/gan.py'),
'fastai.vision.gan.FixedGANSwitcher.after_batch': ( 'vision.gan.html#fixedganswitcher.after_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.FixedGANSwitcher.before_train': ( 'vision.gan.html#fixedganswitcher.before_train',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANDiscriminativeLR': ('vision.gan.html#gandiscriminativelr', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANDiscriminativeLR.__init__': ( 'vision.gan.html#gandiscriminativelr.__init__',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANDiscriminativeLR.after_batch': ( 'vision.gan.html#gandiscriminativelr.after_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANDiscriminativeLR.before_batch': ( 'vision.gan.html#gandiscriminativelr.before_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANLearner': ('vision.gan.html#ganlearner', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLearner.__init__': ('vision.gan.html#ganlearner.__init__', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLearner.from_learners': ( 'vision.gan.html#ganlearner.from_learners',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANLearner.wgan': ('vision.gan.html#ganlearner.wgan', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLoss': ('vision.gan.html#ganloss', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLoss.__init__': ('vision.gan.html#ganloss.__init__', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLoss.critic': ('vision.gan.html#ganloss.critic', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANLoss.generator': ('vision.gan.html#ganloss.generator', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANModule': ('vision.gan.html#ganmodule', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANModule.__init__': ('vision.gan.html#ganmodule.__init__', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANModule.forward': ('vision.gan.html#ganmodule.forward', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANModule.switch': ('vision.gan.html#ganmodule.switch', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer': ('vision.gan.html#gantrainer', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.__init__': ('vision.gan.html#gantrainer.__init__', 'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer._set_trainable': ( 'vision.gan.html#gantrainer._set_trainable',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.after_batch': ( 'vision.gan.html#gantrainer.after_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.before_batch': ( 'vision.gan.html#gantrainer.before_batch',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.before_epoch': ( 'vision.gan.html#gantrainer.before_epoch',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.before_fit': ( 'vision.gan.html#gantrainer.before_fit',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.before_validate': ( 'vision.gan.html#gantrainer.before_validate',
'fastai/vision/gan.py'),
'fastai.vision.gan.GANTrainer.switch': ('vision.gan.html#gantrainer.switch', 'fastai/vision/gan.py'),
'fastai.vision.gan.InvisibleTensor': ('vision.gan.html#invisibletensor', 'fastai/vision/gan.py'),
'fastai.vision.gan.InvisibleTensor.show': ( 'vision.gan.html#invisibletensor.show',
'fastai/vision/gan.py'),
'fastai.vision.gan._conv': ('vision.gan.html#_conv', 'fastai/vision/gan.py'),
'fastai.vision.gan._tk_diff': ('vision.gan.html#_tk_diff', 'fastai/vision/gan.py'),
'fastai.vision.gan._tk_mean': ('vision.gan.html#_tk_mean', 'fastai/vision/gan.py'),
'fastai.vision.gan.accuracy_thresh_expand': ( 'vision.gan.html#accuracy_thresh_expand',
'fastai/vision/gan.py'),
'fastai.vision.gan.basic_critic': ('vision.gan.html#basic_critic', 'fastai/vision/gan.py'),
'fastai.vision.gan.basic_generator': ('vision.gan.html#basic_generator', 'fastai/vision/gan.py'),
'fastai.vision.gan.gan_critic': ('vision.gan.html#gan_critic', 'fastai/vision/gan.py'),
'fastai.vision.gan.gan_loss_from_func': ('vision.gan.html#gan_loss_from_func', 'fastai/vision/gan.py'),
'fastai.vision.gan.generate_noise': ('vision.gan.html#generate_noise', 'fastai/vision/gan.py'),
'fastai.vision.gan.set_freeze_model': ('vision.gan.html#set_freeze_model', 'fastai/vision/gan.py'),
'fastai.vision.gan.show_batch': ('vision.gan.html#show_batch', 'fastai/vision/gan.py'),
'fastai.vision.gan.show_results': ('vision.gan.html#show_results', 'fastai/vision/gan.py')},
'fastai.vision.learner': { 'fastai.vision.learner.TimmBody': ('vision.learner.html#timmbody', 'fastai/vision/learner.py'),
'fastai.vision.learner.TimmBody.__init__': ( 'vision.learner.html#timmbody.__init__',
'fastai/vision/learner.py'),
'fastai.vision.learner.TimmBody.forward': ( 'vision.learner.html#timmbody.forward',
'fastai/vision/learner.py'),
'fastai.vision.learner._add_norm': ('vision.learner.html#_add_norm', 'fastai/vision/learner.py'),
'fastai.vision.learner._alexnet_split': ( 'vision.learner.html#_alexnet_split',
'fastai/vision/learner.py'),
'fastai.vision.learner._densenet_split': ( 'vision.learner.html#_densenet_split',
'fastai/vision/learner.py'),
'fastai.vision.learner._get_first_layer': ( 'vision.learner.html#_get_first_layer',
'fastai/vision/learner.py'),
'fastai.vision.learner._is_pool_type': ( 'vision.learner.html#_is_pool_type',
'fastai/vision/learner.py'),
'fastai.vision.learner._load_pretrained_weights': ( 'vision.learner.html#_load_pretrained_weights',
'fastai/vision/learner.py'),
'fastai.vision.learner._resnet_split': ( 'vision.learner.html#_resnet_split',
'fastai/vision/learner.py'),
'fastai.vision.learner._squeezenet_split': ( 'vision.learner.html#_squeezenet_split',
'fastai/vision/learner.py'),
'fastai.vision.learner._timm_norm': ('vision.learner.html#_timm_norm', 'fastai/vision/learner.py'),
'fastai.vision.learner._update_first_layer': ( 'vision.learner.html#_update_first_layer',
'fastai/vision/learner.py'),
'fastai.vision.learner._vgg_split': ('vision.learner.html#_vgg_split', 'fastai/vision/learner.py'),
'fastai.vision.learner._xresnet_split': ( 'vision.learner.html#_xresnet_split',
'fastai/vision/learner.py'),
'fastai.vision.learner.add_head': ('vision.learner.html#add_head', 'fastai/vision/learner.py'),
'fastai.vision.learner.cnn_learner': ('vision.learner.html#cnn_learner', 'fastai/vision/learner.py'),
'fastai.vision.learner.create_body': ('vision.learner.html#create_body', 'fastai/vision/learner.py'),
'fastai.vision.learner.create_cnn_model': ( 'vision.learner.html#create_cnn_model',
'fastai/vision/learner.py'),
'fastai.vision.learner.create_head': ('vision.learner.html#create_head', 'fastai/vision/learner.py'),
'fastai.vision.learner.create_timm_model': ( 'vision.learner.html#create_timm_model',
'fastai/vision/learner.py'),
'fastai.vision.learner.create_unet_model': ( 'vision.learner.html#create_unet_model',
'fastai/vision/learner.py'),
'fastai.vision.learner.create_vision_model': ( 'vision.learner.html#create_vision_model',
'fastai/vision/learner.py'),
'fastai.vision.learner.cut_model': ('vision.learner.html#cut_model', 'fastai/vision/learner.py'),
'fastai.vision.learner.default_split': ( 'vision.learner.html#default_split',
'fastai/vision/learner.py'),
'fastai.vision.learner.has_pool_type': ( 'vision.learner.html#has_pool_type',
'fastai/vision/learner.py'),
'fastai.vision.learner.plot_top_losses': ( 'vision.learner.html#plot_top_losses',
'fastai/vision/learner.py'),
'fastai.vision.learner.show_results': ( 'vision.learner.html#show_results',
'fastai/vision/learner.py'),
'fastai.vision.learner.unet_learner': ( 'vision.learner.html#unet_learner',
'fastai/vision/learner.py'),
'fastai.vision.learner.vision_learner': ( 'vision.learner.html#vision_learner',
'fastai/vision/learner.py')},
'fastai.vision.models.all': {},
'fastai.vision.models.tvm': {},
'fastai.vision.models.unet': { 'fastai.vision.models.unet.DynamicUnet': ( 'vision.models.unet.html#dynamicunet',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.DynamicUnet.__del__': ( 'vision.models.unet.html#dynamicunet.__del__',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.DynamicUnet.__init__': ( 'vision.models.unet.html#dynamicunet.__init__',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.ResizeToOrig': ( 'vision.models.unet.html#resizetoorig',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.ResizeToOrig.__init__': ( 'vision.models.unet.html#resizetoorig.__init__',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.ResizeToOrig.forward': ( 'vision.models.unet.html#resizetoorig.forward',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.UnetBlock': ( 'vision.models.unet.html#unetblock',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.UnetBlock.__init__': ( 'vision.models.unet.html#unetblock.__init__',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet.UnetBlock.forward': ( 'vision.models.unet.html#unetblock.forward',
'fastai/vision/models/unet.py'),
'fastai.vision.models.unet._get_sz_change_idxs': ( 'vision.models.unet.html#_get_sz_change_idxs',
'fastai/vision/models/unet.py')},
'fastai.vision.models.xresnet': { 'fastai.vision.models.xresnet.XResNet': ( 'vision.models.xresnet.html#xresnet',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.XResNet.__init__': ( 'vision.models.xresnet.html#xresnet.__init__',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.XResNet._make_blocks': ( 'vision.models.xresnet.html#xresnet._make_blocks',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.XResNet._make_layer': ( 'vision.models.xresnet.html#xresnet._make_layer',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet._xresnet': ( 'vision.models.xresnet.html#_xresnet',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.init_cnn': ( 'vision.models.xresnet.html#init_cnn',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet101': ( 'vision.models.xresnet.html#xresnet101',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet152': ( 'vision.models.xresnet.html#xresnet152',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet18': ( 'vision.models.xresnet.html#xresnet18',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet18_deep': ( 'vision.models.xresnet.html#xresnet18_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet18_deeper': ( 'vision.models.xresnet.html#xresnet18_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet34': ( 'vision.models.xresnet.html#xresnet34',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet34_deep': ( 'vision.models.xresnet.html#xresnet34_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet34_deeper': ( 'vision.models.xresnet.html#xresnet34_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet50': ( 'vision.models.xresnet.html#xresnet50',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet50_deep': ( 'vision.models.xresnet.html#xresnet50_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnet50_deeper': ( 'vision.models.xresnet.html#xresnet50_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnext101': ( 'vision.models.xresnet.html#xresnext101',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnext18': ( 'vision.models.xresnet.html#xresnext18',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnext34': ( 'vision.models.xresnet.html#xresnext34',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xresnext50': ( 'vision.models.xresnet.html#xresnext50',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnet101': ( 'vision.models.xresnet.html#xse_resnet101',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnet152': ( 'vision.models.xresnet.html#xse_resnet152',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnet18': ( 'vision.models.xresnet.html#xse_resnet18',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnet34': ( 'vision.models.xresnet.html#xse_resnet34',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnet50': ( 'vision.models.xresnet.html#xse_resnet50',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext101': ( 'vision.models.xresnet.html#xse_resnext101',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext18': ( 'vision.models.xresnet.html#xse_resnext18',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext18_deep': ( 'vision.models.xresnet.html#xse_resnext18_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext18_deeper': ( 'vision.models.xresnet.html#xse_resnext18_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext34': ( 'vision.models.xresnet.html#xse_resnext34',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext34_deep': ( 'vision.models.xresnet.html#xse_resnext34_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext34_deeper': ( 'vision.models.xresnet.html#xse_resnext34_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext50': ( 'vision.models.xresnet.html#xse_resnext50',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext50_deep': ( 'vision.models.xresnet.html#xse_resnext50_deep',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xse_resnext50_deeper': ( 'vision.models.xresnet.html#xse_resnext50_deeper',
'fastai/vision/models/xresnet.py'),
'fastai.vision.models.xresnet.xsenet154': ( 'vision.models.xresnet.html#xsenet154',
'fastai/vision/models/xresnet.py')},
'fastai.vision.utils': { 'fastai.vision.utils._download_image_inner': ( 'vision.utils.html#_download_image_inner',
'fastai/vision/utils.py'),
'fastai.vision.utils._get_downloaded_image_filename': ( 'vision.utils.html#_get_downloaded_image_filename',
'fastai/vision/utils.py'),
'fastai.vision.utils.download_images': ('vision.utils.html#download_images', 'fastai/vision/utils.py'),
'fastai.vision.utils.resize_image': ('vision.utils.html#resize_image', 'fastai/vision/utils.py'),
'fastai.vision.utils.resize_images': ('vision.utils.html#resize_images', 'fastai/vision/utils.py'),
'fastai.vision.utils.resize_to': ('vision.utils.html#resize_to', 'fastai/vision/utils.py'),
'fastai.vision.utils.verify_image': ('vision.utils.html#verify_image', 'fastai/vision/utils.py'),
'fastai.vision.utils.verify_images': ('vision.utils.html#verify_images', 'fastai/vision/utils.py')},
'fastai.vision.widgets': { 'fastai.vision.widgets.Box.__getitem__': ( 'vision.widgets.html#box.__getitem__',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImageClassifierCleaner': ( 'vision.widgets.html#imageclassifiercleaner',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImageClassifierCleaner.__init__': ( 'vision.widgets.html#imageclassifiercleaner.__init__',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImageClassifierCleaner._ipython_display_': ( 'vision.widgets.html#imageclassifiercleaner._ipython_display_',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImageClassifierCleaner.on_change_ds': ( 'vision.widgets.html#imageclassifiercleaner.on_change_ds',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner': ( 'vision.widgets.html#imagescleaner',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner.__init__': ( 'vision.widgets.html#imagescleaner.__init__',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner._ipython_display_': ( 'vision.widgets.html#imagescleaner._ipython_display_',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner.change': ( 'vision.widgets.html#imagescleaner.change',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner.delete': ( 'vision.widgets.html#imagescleaner.delete',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner.set_fns': ( 'vision.widgets.html#imagescleaner.set_fns',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.ImagesCleaner.values': ( 'vision.widgets.html#imagescleaner.values',
'fastai/vision/widgets.py'),
'fastai.vision.widgets._get_iw_info': ( 'vision.widgets.html#_get_iw_info',
'fastai/vision/widgets.py'),
'fastai.vision.widgets._open_thumb': ('vision.widgets.html#_open_thumb', 'fastai/vision/widgets.py'),
'fastai.vision.widgets._update_children': ( 'vision.widgets.html#_update_children',
'fastai/vision/widgets.py'),
'fastai.vision.widgets.carousel': ('vision.widgets.html#carousel', 'fastai/vision/widgets.py'),
'fastai.vision.widgets.widget': ('vision.widgets.html#widget', 'fastai/vision/widgets.py')}}}
| 338,820 | 124.118538 | 196 | py |
fastai | fastai-master/fastai/collab.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/45_collab.ipynb.
# %% ../nbs/45_collab.ipynb 2
from __future__ import annotations
from .tabular.all import *
# %% auto 0
__all__ = ['TabularCollab', 'CollabDataLoaders', 'EmbeddingDotBias', 'EmbeddingNN', 'collab_learner']
# %% ../nbs/45_collab.ipynb 7
class TabularCollab(TabularPandas):
"Instance of `TabularPandas` suitable for collaborative filtering (with no continuous variable)"
with_cont=False
# %% ../nbs/45_collab.ipynb 9
class CollabDataLoaders(DataLoaders):
"Base `DataLoaders` for collaborative filtering."
@delegates(DataLoaders.from_dblock)
@classmethod
def from_df(cls, ratings, valid_pct=0.2, user_name=None, item_name=None, rating_name=None, seed=None, path='.', **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `ratings`."
user_name = ifnone(user_name, ratings.columns[0])
item_name = ifnone(item_name, ratings.columns[1])
rating_name = ifnone(rating_name, ratings.columns[2])
cat_names = [user_name,item_name]
splits = RandomSplitter(valid_pct=valid_pct, seed=seed)(range_of(ratings))
to = TabularCollab(ratings, [Categorify], cat_names, y_names=[rating_name], y_block=TransformBlock(), splits=splits)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls, csv, **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `csv`."
return cls.from_df(pd.read_csv(csv), **kwargs)
CollabDataLoaders.from_csv = delegates(to=CollabDataLoaders.from_df)(CollabDataLoaders.from_csv)
# %% ../nbs/45_collab.ipynb 19
class EmbeddingDotBias(Module):
"Base dot model for collaborative filtering."
def __init__(self, n_factors, n_users, n_items, y_range=None):
self.y_range = y_range
(self.u_weight, self.i_weight, self.u_bias, self.i_bias) = [Embedding(*o) for o in [
(n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
]]
def forward(self, x):
users,items = x[:,0],x[:,1]
dot = self.u_weight(users)* self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.y_range is None: return res
return torch.sigmoid(res) * (self.y_range[1]-self.y_range[0]) + self.y_range[0]
@classmethod
def from_classes(cls, n_factors, classes, user=None, item=None, y_range=None):
"Build a model with `n_factors` by inferring `n_users` and `n_items` from `classes`"
if user is None: user = list(classes.keys())[0]
if item is None: item = list(classes.keys())[1]
res = cls(n_factors, len(classes[user]), len(classes[item]), y_range=y_range)
res.classes,res.user,res.item = classes,user,item
return res
def _get_idx(self, arr, is_item=True):
"Fetch item or user (based on `is_item`) for all in `arr`"
assert hasattr(self, 'classes'), "Build your model with `EmbeddingDotBias.from_classes` to use this functionality."
classes = self.classes[self.item] if is_item else self.classes[self.user]
c2i = {v:k for k,v in enumerate(classes)}
try: return tensor([c2i[o] for o in arr])
except KeyError as e:
message = f"You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data. If it was in your original data, it may have been split such that it's only in the validation set now."
raise modify_exception(e, message, replace=True)
def bias(self, arr, is_item=True):
"Bias for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_bias if is_item else self.u_bias).eval().cpu()
return to_detach(layer(idx).squeeze(),gather=False)
def weight(self, arr, is_item=True):
"Weight for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_weight if is_item else self.u_weight).eval().cpu()
return to_detach(layer(idx),gather=False)
# %% ../nbs/45_collab.ipynb 34
class EmbeddingNN(TabularModel):
"Subclass `TabularModel` to create a NN suitable for collaborative filtering."
@delegates(TabularModel.__init__)
def __init__(self, emb_szs, layers, **kwargs):
super().__init__(emb_szs=emb_szs, n_cont=0, out_sz=1, layers=layers, **kwargs)
# %% ../nbs/45_collab.ipynb 40
@delegates(Learner.__init__)
def collab_learner(dls, n_factors=50, use_nn=False, emb_szs=None, layers=None, config=None, y_range=None, loss_func=None, **kwargs):
"Create a Learner for collaborative filtering on `dls`."
emb_szs = get_emb_sz(dls, ifnone(emb_szs, {}))
if loss_func is None: loss_func = MSELossFlat()
if config is None: config = tabular_config()
if y_range is not None: config['y_range'] = y_range
if layers is None: layers = [n_factors]
if use_nn: model = EmbeddingNN(emb_szs=emb_szs, layers=layers, **config)
else: model = EmbeddingDotBias.from_classes(n_factors, dls.classes, y_range=y_range)
return Learner(dls, model, loss_func=loss_func, **kwargs)
| 5,199 | 49.485437 | 221 | py |
fastai | fastai-master/fastai/interpret.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/20_interpret.ipynb.
# %% ../nbs/20_interpret.ipynb 2
from __future__ import annotations
from .data.all import *
from .optimizer import *
from .learner import *
from .tabular.core import *
import sklearn.metrics as skm
# %% auto 0
__all__ = ['plot_top_losses', 'Interpretation', 'ClassificationInterpretation', 'SegmentationInterpretation']
# %% ../nbs/20_interpret.ipynb 7
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
# %% ../nbs/20_interpret.ipynb 8
_all_ = ["plot_top_losses"]
# %% ../nbs/20_interpret.ipynb 9
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self,
learn:Learner,
dl:DataLoader, # `DataLoader` to run inference over
losses:TensorBase, # Losses calculated from `dl`
act=None # Activation function for prediction
):
store_attr()
def __getitem__(self, idxs):
"Return inputs, preds, targs, decoded outputs, and losses at `idxs`"
if isinstance(idxs, Tensor): idxs = idxs.tolist()
if not is_listy(idxs): idxs = [idxs]
items = getattr(self.dl.items, 'iloc', L(self.dl.items))[idxs]
tmp_dl = self.learn.dls.test_dl(items, with_labels=True, process=not isinstance(self.dl, TabDataLoader))
inps,preds,targs,decoded = self.learn.get_preds(dl=tmp_dl, with_input=True, with_loss=False,
with_decoded=True, act=self.act, reorder=False)
return inps, preds, targs, decoded, self.losses[idxs]
@classmethod
def from_learner(cls,
learn, # Model used to create interpretation
ds_idx:int=1, # Index of `learn.dls` when `dl` is None
dl:DataLoader=None, # `Dataloader` used to make predictions
act=None # Override default or set prediction activation function
):
"Construct interpretation object from a learner"
if dl is None: dl = learn.dls[ds_idx].new(shuffle=False, drop_last=False)
_,_,losses = learn.get_preds(dl=dl, with_input=False, with_loss=True, with_decoded=False,
with_preds=False, with_targs=False, act=act)
return cls(learn, dl, losses, act)
def top_losses(self,
k:int|None=None, # Return `k` losses, defaults to all
largest:bool=True, # Sort losses by largest or smallest
items:bool=False # Whether to return input items
):
"`k` largest(/smallest) losses and indexes, defaulting to all losses."
losses, idx = self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
if items: return losses, idx, getattr(self.dl.items, 'iloc', L(self.dl.items))[idx]
else: return losses, idx
def plot_top_losses(self,
k:int|MutableSequence, # Number of losses to plot
largest:bool=True, # Sort losses by largest or smallest
**kwargs
):
"Show `k` largest(/smallest) preds and losses. Implementation based on type dispatch"
if is_listy(k) or isinstance(k, range):
losses, idx = (o[k] for o in self.top_losses(None, largest))
else:
losses, idx = self.top_losses(k, largest)
inps, preds, targs, decoded, _ = self[idx]
inps, targs, decoded = tuplify(inps), tuplify(targs), tuplify(decoded)
x, y, its = self.dl._pre_show_batch(inps+targs, max_n=len(idx))
x1, y1, outs = self.dl._pre_show_batch(inps+decoded, max_n=len(idx))
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), preds, losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knows how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
def show_results(self,
idxs:list, # Indices of predictions and targets
**kwargs
):
"Show predictions and targets of `idxs`"
if isinstance(idxs, Tensor): idxs = idxs.tolist()
if not is_listy(idxs): idxs = [idxs]
inps, _, targs, decoded, _ = self[idxs]
b = tuplify(inps)+tuplify(targs)
self.dl.show_results(b, tuplify(decoded), max_n=len(idxs), **kwargs)
# %% ../nbs/20_interpret.ipynb 22
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self,
learn:Learner,
dl:DataLoader, # `DataLoader` to run inference over
losses:TensorBase, # Losses calculated from `dl`
act=None # Activation function for prediction
):
super().__init__(learn, dl, losses, act)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
_,targs,decoded = self.learn.get_preds(dl=self.dl, with_decoded=True, with_preds=True,
with_targs=True, act=self.act)
d,t = flatten_check(decoded, targs)
cm = ((d==x[:,None]) & (t==x[:,None,None])).long().sum(2)
return to_np(cm)
def plot_confusion_matrix(self,
normalize:bool=False, # Whether to normalize occurrences
title:str='Confusion matrix', # Title of plot
cmap:str="Blues", # Colormap from matplotlib
norm_dec:int=2, # Decimal places for normalized occurrences
plot_txt:bool=True, # Display occurrence in matrix
**kwargs
):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white"
if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending largest non-diagonal entries of confusion matrix (actual, predicted, # occurrences"
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j]) for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def print_classification_report(self):
"Print scikit-learn classification report"
_,targs,decoded = self.learn.get_preds(dl=self.dl, with_decoded=True, with_preds=True,
with_targs=True, act=self.act)
d,t = flatten_check(decoded, targs)
names = [str(v) for v in self.vocab]
print(skm.classification_report(t, d, labels=list(self.vocab.o2i.values()), target_names=names))
# %% ../nbs/20_interpret.ipynb 27
class SegmentationInterpretation(Interpretation):
"Interpretation methods for segmentation models."
pass
| 7,769 | 43.913295 | 112 | py |
fastai | fastai-master/fastai/layers.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01_layers.ipynb.
# %% ../nbs/01_layers.ipynb 2
from __future__ import annotations
from .imports import *
from .torch_imports import *
from .torch_core import *
from torch.nn.utils import weight_norm, spectral_norm
# %% auto 0
__all__ = ['NormType', 'inplace_relu', 'module', 'Identity', 'Lambda', 'PartialLambda', 'Flatten', 'ToTensorBase', 'View',
'ResizeBatch', 'Debugger', 'sigmoid_range', 'SigmoidRange', 'AdaptiveConcatPool1d', 'AdaptiveConcatPool2d',
'PoolType', 'adaptive_pool', 'PoolFlatten', 'BatchNorm', 'InstanceNorm', 'BatchNorm1dFlat', 'LinBnDrop',
'sigmoid', 'sigmoid_', 'vleaky_relu', 'init_default', 'init_linear', 'ConvLayer', 'AdaptiveAvgPool',
'MaxPool', 'AvgPool', 'trunc_normal_', 'Embedding', 'SelfAttention', 'PooledSelfAttention2d',
'SimpleSelfAttention', 'icnr_init', 'PixelShuffle_ICNR', 'sequential', 'SequentialEx', 'MergeLayer', 'Cat',
'SimpleCNN', 'ProdLayer', 'SEModule', 'ResBlock', 'SEBlock', 'SEResNeXtBlock', 'SeparableBlock',
'TimeDistributed', 'swish', 'Swish', 'MishJitAutoFn', 'mish', 'Mish', 'ParameterModule',
'children_and_parameters', 'has_children', 'flatten_model', 'NoneReduce', 'in_channels']
# %% ../nbs/01_layers.ipynb 6
def module(*flds, **defaults):
"Decorator to create an `nn.Module` using `f` as `forward` method"
pa = [inspect.Parameter(o, inspect.Parameter.POSITIONAL_OR_KEYWORD) for o in flds]
pb = [inspect.Parameter(k, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=v)
for k,v in defaults.items()]
params = pa+pb
all_flds = [*flds,*defaults.keys()]
def _f(f):
class c(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
for i,o in enumerate(args): kwargs[all_flds[i]] = o
kwargs = merge(defaults,kwargs)
for k,v in kwargs.items(): setattr(self,k,v)
__repr__ = basic_repr(all_flds)
forward = f
c.__signature__ = inspect.Signature(params)
c.__name__ = c.__qualname__ = f.__name__
c.__doc__ = f.__doc__
return c
return _f
# %% ../nbs/01_layers.ipynb 7
@module()
def Identity(self, x):
"Do nothing at all"
return x
# %% ../nbs/01_layers.ipynb 9
@module('func')
def Lambda(self, x):
"An easy way to create a pytorch layer for a simple `func`"
return self.func(x)
# %% ../nbs/01_layers.ipynb 11
class PartialLambda(Lambda):
"Layer that applies `partial(func, **kwargs)`"
def __init__(self, func, **kwargs):
super().__init__(partial(func, **kwargs))
self.repr = f'{func.__name__}, {kwargs}'
def forward(self, x): return self.func(x)
def __repr__(self): return f'{self.__class__.__name__}({self.repr})'
# %% ../nbs/01_layers.ipynb 13
@module(full=False)
def Flatten(self, x):
"Flatten `x` to a single dimension, e.g. at end of a model. `full` for rank-1 tensor"
return x.view(-1) if self.full else x.view(x.size(0), -1) # Removed cast to Tensorbase
# %% ../nbs/01_layers.ipynb 15
@module(tensor_cls=TensorBase)
def ToTensorBase(self, x):
"Convert x to TensorBase class"
return self.tensor_cls(x)
# %% ../nbs/01_layers.ipynb 17
class View(Module):
"Reshape `x` to `size`"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view(self.size)
# %% ../nbs/01_layers.ipynb 19
class ResizeBatch(Module):
"Reshape `x` to `size`, keeping batch dim the same size"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view((x.size(0),) + self.size)
# %% ../nbs/01_layers.ipynb 21
@module()
def Debugger(self,x):
"A module to debug inside a model."
set_trace()
return x
# %% ../nbs/01_layers.ipynb 22
def sigmoid_range(x, low, high):
"Sigmoid function with range `(low, high)`"
return torch.sigmoid(x) * (high - low) + low
# %% ../nbs/01_layers.ipynb 24
@module('low','high')
def SigmoidRange(self, x):
"Sigmoid module with range `(low, high)`"
return sigmoid_range(x, self.low, self.high)
# %% ../nbs/01_layers.ipynb 27
class AdaptiveConcatPool1d(Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool1d(self.size)
self.mp = nn.AdaptiveMaxPool1d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# %% ../nbs/01_layers.ipynb 28
class AdaptiveConcatPool2d(Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool2d(self.size)
self.mp = nn.AdaptiveMaxPool2d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# %% ../nbs/01_layers.ipynb 31
class PoolType: Avg,Max,Cat = 'Avg','Max','Cat'
# %% ../nbs/01_layers.ipynb 32
def adaptive_pool(pool_type):
return nn.AdaptiveAvgPool2d if pool_type=='Avg' else nn.AdaptiveMaxPool2d if pool_type=='Max' else AdaptiveConcatPool2d
# %% ../nbs/01_layers.ipynb 33
class PoolFlatten(nn.Sequential):
"Combine `nn.AdaptiveAvgPool2d` and `Flatten`."
def __init__(self, pool_type=PoolType.Avg): super().__init__(adaptive_pool(pool_type)(1), Flatten())
# %% ../nbs/01_layers.ipynb 36
NormType = Enum('NormType', 'Batch BatchZero Weight Spectral Instance InstanceZero')
# %% ../nbs/01_layers.ipynb 37
def _get_norm(prefix, nf, ndim=2, zero=False, **kwargs):
"Norm layer with `nf` features and `ndim` initialized depending on `norm_type`."
assert 1 <= ndim <= 3
bn = getattr(nn, f"{prefix}{ndim}d")(nf, **kwargs)
if bn.affine:
bn.bias.data.fill_(1e-3)
bn.weight.data.fill_(0. if zero else 1.)
return bn
# %% ../nbs/01_layers.ipynb 38
@delegates(nn.BatchNorm2d)
def BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):
"BatchNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('BatchNorm', nf, ndim, zero=norm_type==NormType.BatchZero, **kwargs)
# %% ../nbs/01_layers.ipynb 39
@delegates(nn.InstanceNorm2d)
def InstanceNorm(nf, ndim=2, norm_type=NormType.Instance, affine=True, **kwargs):
"InstanceNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('InstanceNorm', nf, ndim, zero=norm_type==NormType.InstanceZero, affine=affine, **kwargs)
# %% ../nbs/01_layers.ipynb 45
class BatchNorm1dFlat(nn.BatchNorm1d):
"`nn.BatchNorm1d`, but first flattens leading dimensions"
def forward(self, x):
if x.dim()==2: return super().forward(x)
*f,l = x.shape
x = x.contiguous().view(-1,l)
return super().forward(x).view(*f,l)
# %% ../nbs/01_layers.ipynb 47
class LinBnDrop(nn.Sequential):
"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [BatchNorm(n_out if lin_first else n_in, ndim=1)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
lin = [nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin+layers if lin_first else layers+lin
super().__init__(*layers)
# %% ../nbs/01_layers.ipynb 51
def sigmoid(input, eps=1e-7):
"Same as `torch.sigmoid`, plus clamping to `(eps,1-eps)"
return input.sigmoid().clamp(eps,1-eps)
# %% ../nbs/01_layers.ipynb 52
def sigmoid_(input, eps=1e-7):
"Same as `torch.sigmoid_`, plus clamping to `(eps,1-eps)"
return input.sigmoid_().clamp_(eps,1-eps)
# %% ../nbs/01_layers.ipynb 53
from torch.nn.init import kaiming_uniform_,uniform_,xavier_uniform_,normal_
# %% ../nbs/01_layers.ipynb 54
def vleaky_relu(input, inplace=True):
"`F.leaky_relu` with 0.3 slope"
return F.leaky_relu(input, negative_slope=0.3, inplace=inplace)
# %% ../nbs/01_layers.ipynb 55
for o in F.relu,nn.ReLU,F.relu6,nn.ReLU6,F.leaky_relu,nn.LeakyReLU:
o.__default_init__ = kaiming_uniform_
# %% ../nbs/01_layers.ipynb 56
for o in F.sigmoid,nn.Sigmoid,F.tanh,nn.Tanh,sigmoid,sigmoid_:
o.__default_init__ = xavier_uniform_
# %% ../nbs/01_layers.ipynb 57
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func and hasattr(m, 'weight'): func(m.weight)
with torch.no_grad(): nested_callable(m, 'bias.fill_')(0.)
return m
# %% ../nbs/01_layers.ipynb 58
def init_linear(m, act_func=None, init='auto', bias_std=0.01):
if getattr(m,'bias',None) is not None and bias_std is not None:
if bias_std != 0: normal_(m.bias, 0, bias_std)
else: m.bias.data.zero_()
if init=='auto':
if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_
else: init = nested_callable(act_func, '__class__.__default_init__')
if init == noop: init = getcallable(act_func, '__default_init__')
if callable(init): init(m.weight)
# %% ../nbs/01_layers.ipynb 60
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <=3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
# %% ../nbs/01_layers.ipynb 62
defaults.activation=nn.ReLU
# %% ../nbs/01_layers.ipynb 63
class ConvLayer(nn.Sequential):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers."
@delegates(nn.Conv2d)
def __init__(self, ni, nf, ks=3, stride=1, padding=None, bias=None, ndim=2, norm_type=NormType.Batch, bn_1st=True,
act_cls=defaults.activation, transpose=False, init='auto', xtra=None, bias_std=0.01, **kwargs):
if padding is None: padding = ((ks-1)//2 if not transpose else 0)
bn = norm_type in (NormType.Batch, NormType.BatchZero)
inn = norm_type in (NormType.Instance, NormType.InstanceZero)
if bias is None: bias = not (bn or inn)
conv_func = _conv_func(ndim, transpose=transpose)
conv = conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, **kwargs)
act = None if act_cls is None else act_cls()
init_linear(conv, act, init=init, bias_std=bias_std)
if norm_type==NormType.Weight: conv = weight_norm(conv)
elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
layers = [conv]
act_bn = []
if act is not None: act_bn.append(act)
if bn: act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))
if inn: act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))
if bn_1st: act_bn.reverse()
layers += act_bn
if xtra: layers.append(xtra)
super().__init__(*layers)
# %% ../nbs/01_layers.ipynb 77
def AdaptiveAvgPool(sz=1, ndim=2):
"nn.AdaptiveAvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AdaptiveAvgPool{ndim}d")(sz)
# %% ../nbs/01_layers.ipynb 78
def MaxPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.MaxPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"MaxPool{ndim}d")(ks, stride=stride, padding=padding)
# %% ../nbs/01_layers.ipynb 79
def AvgPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.AvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AvgPool{ndim}d")(ks, stride=stride, padding=padding, ceil_mode=ceil_mode)
# %% ../nbs/01_layers.ipynb 81
def trunc_normal_(x, mean=0., std=1.):
"Truncated normal initialization (approximation)"
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
# %% ../nbs/01_layers.ipynb 82
class Embedding(nn.Embedding):
"Embedding layer with truncated normal initialization"
def __init__(self, ni, nf, std=0.01):
super().__init__(ni, nf)
trunc_normal_(self.weight.data, std=std)
# %% ../nbs/01_layers.ipynb 86
class SelfAttention(Module):
"Self attention layer for `n_channels`."
def __init__(self, n_channels):
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels)]
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, ndim=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
#Notation from the paper.
size = x.size()
x = x.view(*size[:2],-1)
f,g,h = self.query(x),self.key(x),self.value(x)
beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
# %% ../nbs/01_layers.ipynb 95
class PooledSelfAttention2d(Module):
"Pooled self attention layer for 2d."
def __init__(self, n_channels):
self.n_channels = n_channels
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels//2)]
self.out = self._conv(n_channels//2, n_channels)
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
n_ftrs = x.shape[2]*x.shape[3]
f = self.query(x).view(-1, self.n_channels//8, n_ftrs)
g = F.max_pool2d(self.key(x), [2,2]).view(-1, self.n_channels//8, n_ftrs//4)
h = F.max_pool2d(self.value(x), [2,2]).view(-1, self.n_channels//2, n_ftrs//4)
beta = F.softmax(torch.bmm(f.transpose(1, 2), g), -1)
o = self.out(torch.bmm(h, beta.transpose(1,2)).view(-1, self.n_channels//2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# %% ../nbs/01_layers.ipynb 97
def _conv1d_spect(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):
"Create and initialize a `nn.Conv1d` layer with spectral normalization."
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv)
# %% ../nbs/01_layers.ipynb 98
class SimpleSelfAttention(Module):
def __init__(self, n_in:int, ks=1, sym=False):
self.sym,self.n_in = sym,n_in
self.conv = _conv1d_spect(n_in, n_in, ks, padding=ks//2, bias=False)
self.gamma = nn.Parameter(tensor([0.]))
def forward(self,x):
if self.sym:
c = self.conv.weight.view(self.n_in,self.n_in)
c = (c + c.t())/2
self.conv.weight = c.view(self.n_in,self.n_in,1)
size = x.size()
x = x.view(*size[:2],-1)
convx = self.conv(x)
xxT = torch.bmm(x,x.permute(0,2,1).contiguous())
o = torch.bmm(xxT, convx)
o = self.gamma * o + x
return o.view(*size).contiguous()
# %% ../nbs/01_layers.ipynb 101
def icnr_init(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function"
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(x.new_zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
return k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
# %% ../nbs/01_layers.ipynb 104
class PixelShuffle_ICNR(nn.Sequential):
"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`."
def __init__(self, ni, nf=None, scale=2, blur=False, norm_type=NormType.Weight, act_cls=defaults.activation):
super().__init__()
nf = ifnone(nf, ni)
layers = [ConvLayer(ni, nf*(scale**2), ks=1, norm_type=norm_type, act_cls=act_cls, bias_std=0),
nn.PixelShuffle(scale)]
if norm_type == NormType.Weight:
layers[0][0].weight_v.data.copy_(icnr_init(layers[0][0].weight_v.data))
layers[0][0].weight_g.data.copy_(((layers[0][0].weight_v.data**2).sum(dim=[1,2,3])**0.5)[:,None,None,None])
else:
layers[0][0].weight.data.copy_(icnr_init(layers[0][0].weight.data))
if blur: layers += [nn.ReplicationPad2d((1,0,1,0)), nn.AvgPool2d(2, stride=1)]
super().__init__(*layers)
# %% ../nbs/01_layers.ipynb 110
def sequential(*args):
"Create an `nn.Sequential`, wrapping items with `Lambda` if needed"
if len(args) != 1 or not isinstance(args[0], OrderedDict):
args = list(args)
for i,o in enumerate(args):
if not isinstance(o,nn.Module): args[i] = Lambda(o)
return nn.Sequential(*args)
# %% ../nbs/01_layers.ipynb 111
class SequentialEx(Module):
"Like `nn.Sequential`, but with ModuleList semantics, and can access module input"
def __init__(self, *layers): self.layers = nn.ModuleList(layers)
def forward(self, x):
res = x
for l in self.layers:
res.orig = x
nres = l(res)
# We have to remove res.orig to avoid hanging refs and therefore memory leaks
res.orig, nres.orig = None, None
res = nres
return res
def __getitem__(self,i): return self.layers[i]
def append(self,l): return self.layers.append(l)
def extend(self,l): return self.layers.extend(l)
def insert(self,i,l): return self.layers.insert(i,l)
# %% ../nbs/01_layers.ipynb 113
class MergeLayer(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, dense:bool=False): self.dense=dense
def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
# %% ../nbs/01_layers.ipynb 118
class Cat(nn.ModuleList):
"Concatenate layers outputs over a given dim"
def __init__(self, layers, dim=1):
self.dim=dim
super().__init__(layers)
def forward(self, x): return torch.cat([l(x) for l in self], dim=self.dim)
# %% ../nbs/01_layers.ipynb 121
class SimpleCNN(nn.Sequential):
"Create a simple CNN with `filters`."
def __init__(self, filters, kernel_szs=None, strides=None, bn=True):
nl = len(filters)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [ConvLayer(filters[i], filters[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<nl-1 else None)) for i in range(nl)]
layers.append(PoolFlatten())
super().__init__(*layers)
# %% ../nbs/01_layers.ipynb 128
class ProdLayer(Module):
"Merge a shortcut with the result of the module by multiplying them."
def forward(self, x): return x * x.orig
# %% ../nbs/01_layers.ipynb 129
inplace_relu = partial(nn.ReLU, inplace=True)
# %% ../nbs/01_layers.ipynb 130
def SEModule(ch, reduction, act_cls=defaults.activation):
nf = math.ceil(ch//reduction/8)*8
return SequentialEx(nn.AdaptiveAvgPool2d(1),
ConvLayer(ch, nf, ks=1, norm_type=None, act_cls=act_cls),
ConvLayer(nf, ch, ks=1, norm_type=None, act_cls=nn.Sigmoid),
ProdLayer())
# %% ../nbs/01_layers.ipynb 131
class ResBlock(Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(self, expansion, ni, nf, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,
sa=False, sym=False, norm_type=NormType.Batch, act_cls=defaults.activation, ndim=2, ks=3,
pool=AvgPool, pool_first=True, **kwargs):
norm2 = (NormType.BatchZero if norm_type==NormType.Batch else
NormType.InstanceZero if norm_type==NormType.Instance else norm_type)
if nh2 is None: nh2 = nf
if nh1 is None: nh1 = nh2
nf,ni = nf*expansion,ni*expansion
k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)
k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)
convpath = [ConvLayer(ni, nh2, ks, stride=stride, groups=ni if dw else groups, **k0),
ConvLayer(nh2, nf, ks, groups=g2, **k1)
] if expansion == 1 else [
ConvLayer(ni, nh1, 1, **k0),
ConvLayer(nh1, nh2, ks, stride=stride, groups=nh1 if dw else groups, **k0),
ConvLayer(nh2, nf, 1, groups=g2, **k1)]
if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni!=nf: idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))
if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=ndim, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()
def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))
# %% ../nbs/01_layers.ipynb 133
def SEBlock(expansion, ni, nf, groups=1, reduction=16, stride=1, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh1=nf*2, nh2=nf*expansion, **kwargs)
# %% ../nbs/01_layers.ipynb 134
def SEResNeXtBlock(expansion, ni, nf, groups=32, reduction=16, stride=1, base_width=4, **kwargs):
w = math.floor(nf * (base_width / 64)) * groups
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh2=w, **kwargs)
# %% ../nbs/01_layers.ipynb 135
def SeparableBlock(expansion, ni, nf, reduction=16, stride=1, base_width=4, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, reduction=reduction, nh2=nf*2, dw=True, **kwargs)
# %% ../nbs/01_layers.ipynb 138
def _stack_tups(tuples, stack_dim=1):
"Stack tuple of tensors along `stack_dim`"
return tuple(torch.stack([t[i] for t in tuples], dim=stack_dim) for i in range_of(tuples[0]))
# %% ../nbs/01_layers.ipynb 139
class TimeDistributed(Module):
"Applies `module` over `tdim` identically for each step, use `low_mem` to compute one at a time."
def __init__(self, module, low_mem=False, tdim=1):
store_attr()
def forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
if self.low_mem or self.tdim!=1:
return self.low_mem_forward(*tensors, **kwargs)
else:
#only support tdim=1
inp_shape = tensors[0].shape
bs, seq_len = inp_shape[0], inp_shape[1]
out = self.module(*[x.view(bs*seq_len, *x.shape[2:]) for x in tensors], **kwargs)
return self.format_output(out, bs, seq_len)
def low_mem_forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
seq_len = tensors[0].shape[self.tdim]
args_split = [torch.unbind(x, dim=self.tdim) for x in tensors]
out = []
for i in range(seq_len):
out.append(self.module(*[args[i] for args in args_split]), **kwargs)
if isinstance(out[0], tuple):
return _stack_tups(out, stack_dim=self.tdim)
return torch.stack(out, dim=self.tdim)
def format_output(self, out, bs, seq_len):
"unstack from batchsize outputs"
if isinstance(out, tuple):
return tuple(out_i.view(bs, seq_len, *out_i.shape[1:]) for out_i in out)
return out.view(bs, seq_len,*out.shape[1:])
def __repr__(self):
return f'TimeDistributed({self.module})'
# %% ../nbs/01_layers.ipynb 158
from torch.jit import script
# %% ../nbs/01_layers.ipynb 159
@script
def _swish_jit_fwd(x): return x.mul(torch.sigmoid(x))
@script
def _swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class _SwishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _swish_jit_bwd(x, grad_output)
# %% ../nbs/01_layers.ipynb 160
def swish(x, inplace=False): return _SwishJitAutoFn.apply(x)
# %% ../nbs/01_layers.ipynb 161
class Swish(Module):
def forward(self, x): return _SwishJitAutoFn.apply(x)
# %% ../nbs/01_layers.ipynb 162
@script
def _mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x)))
@script
def _mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _mish_jit_bwd(x, grad_output)
# %% ../nbs/01_layers.ipynb 163
def mish(x): return F.mish(x) if torch.__version__ >= '1.9' else MishJitAutoFn.apply(x)
# %% ../nbs/01_layers.ipynb 164
class Mish(Module):
def forward(self, x): return MishJitAutoFn.apply(x)
# %% ../nbs/01_layers.ipynb 165
if ismin_torch('1.9'): Mish = nn.Mish
# %% ../nbs/01_layers.ipynb 166
for o in swish,Swish,mish,Mish: o.__default_init__ = kaiming_uniform_
# %% ../nbs/01_layers.ipynb 169
class ParameterModule(Module):
"Register a lone parameter `p` in a module."
def __init__(self, p): self.val = p
def forward(self, x): return x
# %% ../nbs/01_layers.ipynb 170
def children_and_parameters(m):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# %% ../nbs/01_layers.ipynb 172
def has_children(m):
try: next(m.children())
except StopIteration: return False
return True
# %% ../nbs/01_layers.ipynb 174
def flatten_model(m):
"Return the list of all submodules and parameters of `m`"
return sum(map(flatten_model,children_and_parameters(m)),[]) if has_children(m) else [m]
# %% ../nbs/01_layers.ipynb 176
class NoneReduce():
"A context manager to evaluate `loss_func` with none reduce."
def __init__(self, loss_func): self.loss_func,self.old_red = loss_func,None
def __enter__(self):
if hasattr(self.loss_func, 'reduction'):
self.old_red = self.loss_func.reduction
self.loss_func.reduction = 'none'
return self.loss_func
else: return partial(self.loss_func, reduction='none')
def __exit__(self, type, value, traceback):
if self.old_red is not None: self.loss_func.reduction = self.old_red
# %% ../nbs/01_layers.ipynb 178
def in_channels(m):
"Return the shape of the first weight layer in `m`."
try: return next(l.weight.shape[1] for l in flatten_model(m) if nested_attr(l,'weight.ndim',-1)==4)
except StopIteration as e: e.args = ["No weight layer"]; raise
| 27,282 | 40.526636 | 127 | py |
fastai | fastai-master/fastai/_pytorch_doc.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/99_pytorch_doc.ipynb.
# %% ../nbs/99_pytorch_doc.ipynb 5
from __future__ import annotations
from types import ModuleType
# %% auto 0
__all__ = ['PYTORCH_URL', 'pytorch_doc_link']
# %% ../nbs/99_pytorch_doc.ipynb 6
PYTORCH_URL = 'https://pytorch.org/docs/stable/'
# %% ../nbs/99_pytorch_doc.ipynb 7
def _mod2page(
mod:ModuleType, # A PyTorch module
) -> str:
"Get the webpage name for a PyTorch module"
if mod == Tensor: return 'tensors.html'
name = mod.__name__
name = name.replace('torch.', '').replace('utils.', '')
if name.startswith('nn.modules'): return 'nn.html'
return f'{name}.html'
# %% ../nbs/99_pytorch_doc.ipynb 9
import importlib
# %% ../nbs/99_pytorch_doc.ipynb 10
def pytorch_doc_link(
name:str # Name of a PyTorch module, class or function
) -> (str, None):
"Get the URL to the documentation of a PyTorch module, class or function"
if name.startswith('F'): name = 'torch.nn.functional' + name[1:]
if not name.startswith('torch.'): name = 'torch.' + name
if name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html'
try:
mod = importlib.import_module(name)
return f'{PYTORCH_URL}{_mod2page(mod)}'
except: pass
splits = name.split('.')
mod_name,fname = '.'.join(splits[:-1]),splits[-1]
if mod_name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html#{name}'
try:
mod = importlib.import_module(mod_name)
page = _mod2page(mod)
return f'{PYTORCH_URL}{page}#{name}'
except: return None
| 1,573 | 32.489362 | 77 | py |
fastai | fastai-master/fastai/distributed.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/20a_distributed.ipynb.
# %% ../nbs/20a_distributed.ipynb 2
from __future__ import annotations
from .basics import *
from .callback.progress import ProgressCallback
from torch.nn.parallel import DistributedDataParallel, DataParallel
from .data.load import _FakeLoader,_loaders
from .optimizer import OptimWrapper
try: from accelerate import Accelerator
except ModuleNotFoundError: pass
# %% auto 0
__all__ = ['ParallelTrainer', 'setup_distrib', 'teardown_distrib', 'DistributedDL', 'DistributedTrainer', 'rank0_first']
# %% ../nbs/20a_distributed.ipynb 6
@patch
def reset(self: DataParallel):
"Patch required `reset` call into `DataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# %% ../nbs/20a_distributed.ipynb 7
class ParallelTrainer(Callback):
"Wrap a model `DataParallel` automatically"
run_after,run_before = TrainEvalCallback,Recorder
def __init__(self, device_ids): self.device_ids = device_ids
def before_fit(self): self.learn.model = DataParallel(self.learn.model, device_ids=self.device_ids)
def after_fit(self): self.learn.model = self.learn.model.module
# %% ../nbs/20a_distributed.ipynb 8
@patch
def to_parallel(self: Learner, device_ids=None):
"Add `ParallelTrainer` callback to a `Learner`"
self.add_cb(ParallelTrainer(device_ids))
return self
# %% ../nbs/20a_distributed.ipynb 9
@patch
def detach_parallel(self: Learner):
"Remove `ParallelTrainer` callback from a Learner"
self.remove_cb(ParallelTrainer)
return self
# %% ../nbs/20a_distributed.ipynb 10
@patch
@contextmanager
def parallel_ctx(self: Learner, device_ids=None):
"A context manager to adapt a learner to train in data parallel mode."
try:
self.to_parallel(device_ids)
yield self
finally: self.detach_parallel()
# %% ../nbs/20a_distributed.ipynb 13
@patch
def reset(self: DistributedDataParallel):
"Patch required `reset` call into `DistributedDataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# %% ../nbs/20a_distributed.ipynb 14
def setup_distrib(gpu=None):
"Setup this process to participate in distributed training"
if gpu is None: return gpu
gpu = int(gpu)
torch.cuda.set_device(int(gpu))
if num_distrib() > 0: torch.distributed.init_process_group(backend='nccl', init_method='env://')
return gpu
# %% ../nbs/20a_distributed.ipynb 15
def teardown_distrib():
"Free distributed training resources"
if torch.distributed.is_initialized(): torch.distributed.destroy_process_group()
# %% ../nbs/20a_distributed.ipynb 17
def _round_to_multiple(number,multiple): return int(math.ceil(number/multiple)*multiple)
# %% ../nbs/20a_distributed.ipynb 18
class DistributedDL(TfmdDL):
"A `TfmdDL` which splits a batch into equal size pieces for each worker"
def __init__(self,dl,rank=None,world_size=None):
if rank is None: rank=rank_distrib()
if world_size is None: world_size=num_distrib()
store_attr()
if type(dl) == torch.utils.data.DataLoader:
shuffle = True if eq(type(dl.sampler), torch.utils.data.RandomSampler) else False
self.dl = DataLoader(dataset=dl.dataset, bs=dl.batch_size, num_workers=dl.num_workers, \
pin_memory=dl.pin_memory, timeout=dl.timeout, shuffle=shuffle, drop_last=dl.drop_last, persistent_workers=dl.persistent_workers)
self.bs,self.device,self.drop_last,self.dataset,fake,self.num_workers,self.offs,self.pin_memory = \
attrgetter('bs','device','drop_last','dataset','fake_l','num_workers','offs','pin_memory')(self.dl)
self.fake_l = _FakeLoader(self, fake.pin_memory, fake.num_workers, fake.timeout,
persistent_workers=fake.persistent_workers,
pin_memory_device=fake.pin_memory_device)
def _broadcast(self,t,rank):
"Broadcasts t from rank `rank` to all other ranks. Returns t so t is same for all ranks after call."
t = LongTensor(t).cuda() # nccl only works with cuda tensors
torch.distributed.broadcast(t,rank)
return t.cpu().tolist()
def _to_detach(self,b,cpu=True,gather=True): return to_detach(b,cpu,gather) # member func so we can override for test
def __len__(self): return _round_to_multiple(len(self.dl),self.world_size)//self.world_size
def get_idxs(self):
idxs = list(self.dl.get_idxs()) # compute get_idxs in all ranks (we'll only use rank 0 but size must be consistent)
idxs = self._broadcast(idxs,0) # broadcast and receive it from rank 0 to all
self.n = len(idxs) # we assumed n was dl.n but we really care about number of idxs
# add extra samples to make it evenly divisible
self.n_padded = _round_to_multiple(self.n,self.world_size)
idxs += (idxs * (self.n_padded//self.n))[:self.n_padded-self.n] # idx needs to be repeated when n_padded>>n
# slice padded idxs so that each rank gets self.n_padded//self.world_size tensors
return idxs[self.rank*self.n_padded//self.world_size:(self.rank+1)*self.n_padded//self.world_size]
def before_iter(self):
self.i = 0
self.dl.before_iter()
def randomize(self): self.dl.randomize()
def after_batch(self,b):
self.i += find_bs(b)
return self.dl.after_batch(b)
def after_iter(self): self.dl.after_iter()
def create_batches(self,samps): return self.dl.create_batches(samps)
def to_detach(self,b, cpu=True, gather=True):
b = self._to_detach(b, cpu, gather)
def _inner(b):
if b.ndim>0:
# for each rank, compute overflow of read idxs vs self.n and accumulate them to unpad totals after gathering
n = sum([min(0,max(-len(b)//self.world_size,
self.n-(self.i+r*self.n_padded//self.world_size))) for r in range(self.world_size)])
b = b[:n or None]
return b
return apply(_inner,b) if gather and all(hasattr(self,o) for o in ('i','n','n_padded')) else b
# %% ../nbs/20a_distributed.ipynb 29
_hidden_params = ["mixed_precision", "fp16", "log_with", "logging_dir", "step_scheduler_with_optimizer"]
# %% ../nbs/20a_distributed.ipynb 30
class DistributedTrainer(Callback):
"Wrap `model` in `DistributedDataParallel` and `dls` in `DistributedDL`"
order = 11
@delegates(Accelerator, but=_hidden_params)
def __init__(self,
sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm`
**kwargs
):
store_attr()
self.accelerator = Accelerator(**kwargs)
def before_fit(self):
self.learn.model = self.accelerator.prepare(
nn.SyncBatchNorm.convert_sync_batchnorm(self.model) if self.sync_bn else self.model
)
self.old_dls = list(self.dls)
self.learn.dls.loaders = [self._wrap_dl(dl) for dl in self.dls]
if rank_distrib(): self.learn.logger=noop
def _wrap_dl(self, dl): return dl if isinstance(dl,DistributedDL) else DistributedDL(dl)
def _backward(self): self.accelerator.backward(self.learn.loss_grad)
def before_train(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def before_validate(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def after_fit(self): self.learn.model,self.learn.dls.loaders = self.learn.model.module,self.old_dls
# %% ../nbs/20a_distributed.ipynb 31
@patch
@delegates(Accelerator, but=_hidden_params)
def to_distributed(self: Learner,
sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm`
**kwargs
):
"Add `AcceleratedTrainer` to a learner, and configures an Accelerator"
self.add_cb(DistributedTrainer(sync_bn, **kwargs))
if rank_distrib(): self.remove_cb(ProgressCallback)
return self
# %% ../nbs/20a_distributed.ipynb 32
@patch
def detach_distributed(self: Learner):
"Remove `DistributedTrainer` from a learner"
if num_distrib() <=1: return self
self.remove_cb(DistributedTrainer)
if rank_distrib() and not hasattr(self, 'progress'): self.add_cb(ProgressCallback())
return self
# %% ../nbs/20a_distributed.ipynb 34
@patch
@contextmanager
@delegates(Accelerator, but=_hidden_params)
def distrib_ctx(self: Learner,
sync_bn=True, # Whether to replace all batch norm with `nn.SyncBatchNorm`
in_notebook=False, # Whether we are launching from a notebook or not
**kwargs
):
"A context manager to adapt a learner to train in distributed data parallel mode."
try: import accelerate
except ImportError as e:
e.args = ["Accelerate is required. Install with `pip install accelerate`"]
raise
# Adapt self to DistributedDataParallel, yield, and cleanup afterwards.
cleanup_dpg = False
try:
if in_notebook:
cuda_id = rank_distrib()
if not torch.distributed.is_initialized():
setup_distrib(cuda_id)
cleanup_dpg = torch.distributed.is_initialized()
if not rank_distrib(): print("Training Learner...")
if num_distrib(): self.to_distributed(sync_bn, **kwargs)
yield self
finally:
self.detach_distributed()
if cleanup_dpg: teardown_distrib()
# %% ../nbs/20a_distributed.ipynb 36
def rank0_first(func, *args, **kwargs):
"Execute `func` in the Rank-0 process first, then in other ranks in parallel."
if args or kwargs: func = partial(func, *args, **kwargs)
dummy_l = Learner(DataLoaders(device='cpu'), nn.Linear(1,1), loss_func=lambda: 0)
with dummy_l.distrib_ctx():
if not rank_distrib(): res = func()
distrib_barrier()
if rank_distrib(): res = func()
return res
| 9,784 | 42.878924 | 144 | py |
fastai | fastai-master/fastai/metrics.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/13b_metrics.ipynb.
# %% ../nbs/13b_metrics.ipynb 1
from __future__ import annotations
from .data.all import *
from .optimizer import *
from .learner import *
# %% auto 0
__all__ = ['rmse', 'exp_rmspe', 'perplexity', 'AccumMetric', 'skm_to_fastai', 'optim_metric', 'accuracy', 'error_rate',
'top_k_accuracy', 'APScoreBinary', 'BalancedAccuracy', 'BrierScore', 'CohenKappa', 'F1Score', 'FBeta',
'HammingLoss', 'Jaccard', 'Precision', 'Recall', 'RocAuc', 'RocAucBinary', 'MatthewsCorrCoef',
'accuracy_multi', 'APScoreMulti', 'BrierScoreMulti', 'F1ScoreMulti', 'FBetaMulti', 'HammingLossMulti',
'JaccardMulti', 'MatthewsCorrCoefMulti', 'PrecisionMulti', 'RecallMulti', 'RocAucMulti', 'mse', 'mae',
'msle', 'ExplainedVariance', 'R2Score', 'PearsonCorrCoef', 'SpearmanCorrCoef', 'foreground_acc', 'Dice',
'DiceMulti', 'JaccardCoeff', 'CorpusBLEUMetric', 'Perplexity', 'LossMetric', 'LossMetrics']
# %% ../nbs/13b_metrics.ipynb 7
import sklearn.metrics as skm
import scipy.stats as scs
# %% ../nbs/13b_metrics.ipynb 8
mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']},
doc="All possible activation classes for `AccumMetric")
# %% ../nbs/13b_metrics.ipynb 9
class AccumMetric(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, activation=ActivationType.No, thresh=None, to_np=False,
invert_arg=False, flatten=True, name=None, **kwargs):
store_attr('func,dim_argmax,activation,thresh,flatten')
self._name = ifnone(name, self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__)
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self):
"Clear all targs and preds"
self.targs,self.preds = [],[]
def accumulate(self, learn):
"Store targs and preds from `learn`, using activation function and argmax as appropriate"
pred = learn.pred
if self.activation in [ActivationType.Softmax, ActivationType.BinarySoftmax]:
pred = F.softmax(pred, dim=self.dim_argmax)
if self.activation == ActivationType.BinarySoftmax: pred = pred[:, -1]
elif self.activation == ActivationType.Sigmoid: pred = torch.sigmoid(pred)
elif self.dim_argmax: pred = pred.argmax(dim=self.dim_argmax)
if self.thresh: pred = (pred >= self.thresh)
self.accum_values(pred,learn.y,learn)
def accum_values(self, preds, targs,learn=None):
"Store targs and preds"
to_d = learn.to_detach if learn is not None else to_detach
preds,targs = to_d(preds),to_d(targs)
if self.flatten: preds,targs = flatten_check(preds,targs)
self.preds.append(preds)
self.targs.append(targs)
def __call__(self, preds, targs):
"Calculate metric on one batch of data"
self.reset()
self.accum_values(preds,targs)
return self.value
@property
def value(self):
"Value of the metric using accumulated preds and targs"
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
@property
def name(self): return self._name
@name.setter
def name(self, value): self._name = value
# %% ../nbs/13b_metrics.ipynb 15
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, activation=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
if activation is None:
activation = ActivationType.Sigmoid if (is_class and thresh is not None) else ActivationType.No
return AccumMetric(func, dim_argmax=dim_argmax, activation=activation, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
# %% ../nbs/13b_metrics.ipynb 21
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
# %% ../nbs/13b_metrics.ipynb 25
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
# %% ../nbs/13b_metrics.ipynb 28
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
# %% ../nbs/13b_metrics.ipynb 30
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
# %% ../nbs/13b_metrics.ipynb 32
def APScoreBinary(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label binary classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 34
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
# %% ../nbs/13b_metrics.ipynb 36
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
# %% ../nbs/13b_metrics.ipynb 38
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis, labels=labels, weights=weights,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 40
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 42
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 44
def HammingLoss(axis=-1, sample_weight=None):
"Hamming loss for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 46
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 48
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 50
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 52
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='ovr'):
"Area Under the Receiver Operating Characteristic Curve for single-label multiclass classification problems"
assert multi_class in ['ovr', 'ovo']
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.Softmax, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# %% ../nbs/13b_metrics.ipynb 54
def RocAucBinary(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='raise'):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# %% ../nbs/13b_metrics.ipynb 56
def MatthewsCorrCoef(sample_weight=None, **kwargs):
"Matthews correlation coefficient for single-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, sample_weight=sample_weight, **kwargs)
# %% ../nbs/13b_metrics.ipynb 59
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
# %% ../nbs/13b_metrics.ipynb 62
def APScoreMulti(sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.average_precision_score, activation=activation, flatten=False,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 64
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight, pos_label=pos_label)
# %% ../nbs/13b_metrics.ipynb 66
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"F1 score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.f1_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 68
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.fbeta_score, thresh=thresh, activation=activation, flatten=False,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 70
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Hamming loss for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.hamming_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 72
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Jaccard score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.jaccard_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 74
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, activation=activation, flatten=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 76
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.precision_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 78
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Recall for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.recall_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 80
def RocAucMulti(sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.roc_auc_score, activation=activation, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr)
# %% ../nbs/13b_metrics.ipynb 84
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
# %% ../nbs/13b_metrics.ipynb 86
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
# %% ../nbs/13b_metrics.ipynb 89
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
# %% ../nbs/13b_metrics.ipynb 91
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
# %% ../nbs/13b_metrics.ipynb 93
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
# %% ../nbs/13b_metrics.ipynb 96
def ExplainedVariance(sample_weight=None):
"Explained variance between predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 98
def R2Score(sample_weight=None):
"R2 score between predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 100
@delegates(AccumMetric)
def PearsonCorrCoef(dim_argmax=None, **kwargs):
"Pearson correlation coefficient for regression problem"
def pearsonr(x,y): return scs.pearsonr(x,y)[0]
return AccumMetric(pearsonr, invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# %% ../nbs/13b_metrics.ipynb 103
@delegates(AccumMetric)
def SpearmanCorrCoef(dim_argmax=None, axis=0, nan_policy='propagate', **kwargs):
"Spearman correlation coefficient for regression problem"
def spearmanr(a,b=None,**kwargs): return scs.spearmanr(a,b,**kwargs)[0]
return AccumMetric(partial(spearmanr, axis=axis, nan_policy=nan_policy),
invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# %% ../nbs/13b_metrics.ipynb 111
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = cast(targ.squeeze(1), TensorBase)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
# %% ../nbs/13b_metrics.ipynb 113
class Dice(Metric):
"Dice coefficient metric for binary target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = 0,0
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
self.inter += (pred*targ).float().sum().item()
self.union += (pred+targ).float().sum().item()
@property
def value(self): return 2. * self.inter/self.union if self.union > 0 else None
# %% ../nbs/13b_metrics.ipynb 115
class DiceMulti(Metric):
"Averaged Dice metric (Macro F1) for multiclass target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = {},{}
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
for c in range(learn.pred.shape[self.axis]):
p = torch.where(pred == c, 1, 0)
t = torch.where(targ == c, 1, 0)
c_inter = (p*t).float().sum().item()
c_union = (p+t).float().sum().item()
if c in self.inter:
self.inter[c] += c_inter
self.union[c] += c_union
else:
self.inter[c] = c_inter
self.union[c] = c_union
@property
def value(self):
binary_dice_scores = np.array([])
for c in self.inter:
binary_dice_scores = np.append(binary_dice_scores, 2.*self.inter[c]/self.union[c] if self.union[c] > 0 else np.nan)
return np.nanmean(binary_dice_scores)
# %% ../nbs/13b_metrics.ipynb 118
class JaccardCoeff(Dice):
"Implementation of the Jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
# %% ../nbs/13b_metrics.ipynb 121
class CorpusBLEUMetric(Metric):
def __init__(self, vocab_sz=5000, axis=-1):
"BLEU Metric calculated over the validation corpus"
self.metric_name = 'CorpusBLEU'
self.axis, self.vocab_sz = axis, vocab_sz
self.pred_len,self.targ_len,self.samp_idx,self.corrects,self.counts, = 0,0,0,[0]*4,[0]*4
def reset(self):
self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(self, x, n, max_n=5000):
return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(self, pred, targ, n, max_n=5000):
pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
def accumulate(self, learn):
if learn.training: return None
else:
last_output = learn.pred.argmax(dim=self.axis)
last_target = learn.y
for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
smooth_mteval = 1
for i in range(4):
c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
if c == 0:
smooth_mteval *= 2
c = 1 / smooth_mteval # exp smoothing, method 3 from http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
self.corrects[i] += c
self.counts[i] += t
@property
def value(self):
if self.counts == 0: return None
elif max(self.corrects) == 0: return 0.0
else:
precs = [c/t for c,t in zip(self.corrects,self.counts)]
len_penalty = math.exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1
return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)
# %% ../nbs/13b_metrics.ipynb 124
class Perplexity(AvgLoss):
"Perplexity (exponential of cross-entropy loss) for Language Models"
@property
def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None
@property
def name(self): return "perplexity"
perplexity = Perplexity()
# %% ../nbs/13b_metrics.ipynb 127
class LossMetric(AvgMetric):
"Create a metric from `loss_func.attr` named `nm`"
def __init__(self, attr, nm=None): store_attr('attr,nm')
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(getattr(learn.loss_func, self.attr, 0))*bs
self.count += bs
@property
def name(self): return self.attr if self.nm is None else self.nm
# %% ../nbs/13b_metrics.ipynb 128
def LossMetrics(attrs, nms=None):
"List of `LossMetric` for each of `attrs` and `nms`"
if isinstance(attrs, str): attrs = attrs.split(',')
nms = attrs if nms is None else nms.split(',') if isinstance(nms, str) else nms
return [LossMetric(a, n) for a,n in zip(attrs,nms)]
| 22,629 | 48.195652 | 130 | py |
fastai | fastai-master/fastai/_nbdev.py | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"defaults.benchmark": "00_torch_core.ipynb",
"setup_cuda": "00_torch_core.ipynb",
"subplots": "00_torch_core.ipynb",
"show_image": "00_torch_core.ipynb",
"show_titled_image": "00_torch_core.ipynb",
"show_images": "00_torch_core.ipynb",
"ArrayBase": "00_torch_core.ipynb",
"ArrayImageBase": "00_torch_core.ipynb",
"ArrayImage": "00_torch_core.ipynb",
"ArrayImageBW": "00_torch_core.ipynb",
"ArrayMask": "00_torch_core.ipynb",
"Tensor.__array_eq__": "00_torch_core.ipynb",
"tensor": "00_torch_core.ipynb",
"set_seed": "00_torch_core.ipynb",
"get_random_states": "00_torch_core.ipynb",
"set_random_states": "00_torch_core.ipynb",
"no_random": "00_torch_core.ipynb",
"unsqueeze": "00_torch_core.ipynb",
"unsqueeze_": "00_torch_core.ipynb",
"apply": "00_torch_core.ipynb",
"maybe_gather": "00_torch_core.ipynb",
"to_detach": "00_torch_core.ipynb",
"to_half": "00_torch_core.ipynb",
"to_float": "00_torch_core.ipynb",
"defaults.use_cuda": "00_torch_core.ipynb",
"default_device": "00_torch_core.ipynb",
"to_device": "00_torch_core.ipynb",
"to_cpu": "00_torch_core.ipynb",
"to_np": "00_torch_core.ipynb",
"to_concat": "00_torch_core.ipynb",
"Tensor.set_meta": "00_torch_core.ipynb",
"Tensor.as_subclass": "00_torch_core.ipynb",
"TensorBase": "00_torch_core.ipynb",
"TensorImageBase": "00_torch_core.ipynb",
"TensorImage": "00_torch_core.ipynb",
"TensorImageBW": "00_torch_core.ipynb",
"TensorMask": "00_torch_core.ipynb",
"TensorFlowField": "00_torch_core.ipynb",
"TensorCategory": "00_torch_core.ipynb",
"TensorMultiCategory": "00_torch_core.ipynb",
"TitledTensorScalar": "00_torch_core.ipynb",
"L.tensored": "00_torch_core.ipynb",
"L.stack": "00_torch_core.ipynb",
"L.cat": "00_torch_core.ipynb",
"concat": "00_torch_core.ipynb",
"Chunks": "00_torch_core.ipynb",
"show_title": "00_torch_core.ipynb",
"ShowTitle": "00_torch_core.ipynb",
"TitledInt": "00_torch_core.ipynb",
"TitledFloat": "00_torch_core.ipynb",
"TitledStr": "00_torch_core.ipynb",
"TitledTuple": "00_torch_core.ipynb",
"TitledStr.truncate": "00_torch_core.ipynb",
"pd.DataFrame.__init__": "00_torch_core.ipynb",
"get_empty_df": "00_torch_core.ipynb",
"display_df": "00_torch_core.ipynb",
"get_first": "00_torch_core.ipynb",
"one_param": "00_torch_core.ipynb",
"item_find": "00_torch_core.ipynb",
"find_device": "00_torch_core.ipynb",
"find_bs": "00_torch_core.ipynb",
"np_func": "00_torch_core.ipynb",
"Module": "00_torch_core.ipynb",
"get_model": "00_torch_core.ipynb",
"one_hot": "00_torch_core.ipynb",
"one_hot_decode": "00_torch_core.ipynb",
"params": "00_torch_core.ipynb",
"trainable_params": "00_torch_core.ipynb",
"norm_types": "00_torch_core.ipynb",
"norm_bias_params": "00_torch_core.ipynb",
"batch_to_samples": "00_torch_core.ipynb",
"Tensor.interp_1d": "00_torch_core.ipynb",
"Tensor.pca": "00_torch_core.ipynb",
"logit": "00_torch_core.ipynb",
"num_distrib": "00_torch_core.ipynb",
"rank_distrib": "00_torch_core.ipynb",
"distrib_barrier": "00_torch_core.ipynb",
"Path.save_array": "00_torch_core.ipynb",
"Path.load_array": "00_torch_core.ipynb",
"base_doc": "00_torch_core.ipynb",
"doc": "00_torch_core.ipynb",
"nested_reorder": "00_torch_core.ipynb",
"make_cross_image": "00_torch_core.ipynb",
"show_image_batch": "00_torch_core.ipynb",
"requires_grad": "00_torch_core.ipynb",
"init_default": "01_layers.ipynb",
"cond_init": "00_torch_core.ipynb",
"apply_leaf": "00_torch_core.ipynb",
"apply_init": "00_torch_core.ipynb",
"script_use_ctx": "00_torch_core.ipynb",
"script_save_ctx": "00_torch_core.ipynb",
"script_fwd": "00_torch_core.ipynb",
"script_bwd": "00_torch_core.ipynb",
"grad_module": "00_torch_core.ipynb",
"ismin_torch": "00_torch_core.ipynb",
"notmax_torch": "00_torch_core.ipynb",
"module": "01_layers.ipynb",
"Identity": "01_layers.ipynb",
"Lambda": "01_layers.ipynb",
"PartialLambda": "01_layers.ipynb",
"Flatten": "01_layers.ipynb",
"ToTensorBase": "01_layers.ipynb",
"View": "01_layers.ipynb",
"ResizeBatch": "01_layers.ipynb",
"Debugger": "01_layers.ipynb",
"sigmoid_range": "01_layers.ipynb",
"SigmoidRange": "01_layers.ipynb",
"AdaptiveConcatPool1d": "01_layers.ipynb",
"AdaptiveConcatPool2d": "01_layers.ipynb",
"PoolType": "01_layers.ipynb",
"adaptive_pool": "01_layers.ipynb",
"PoolFlatten": "01_layers.ipynb",
"NormType": "01_layers.ipynb",
"BatchNorm": "01_layers.ipynb",
"InstanceNorm": "01_layers.ipynb",
"BatchNorm1dFlat": "01_layers.ipynb",
"LinBnDrop": "01_layers.ipynb",
"sigmoid": "01_layers.ipynb",
"sigmoid_": "01_layers.ipynb",
"vleaky_relu": "01_layers.ipynb",
"init_linear": "01_layers.ipynb",
"defaults.activation": "01_layers.ipynb",
"ConvLayer": "01_layers.ipynb",
"AdaptiveAvgPool": "01_layers.ipynb",
"MaxPool": "01_layers.ipynb",
"AvgPool": "01_layers.ipynb",
"trunc_normal_": "01_layers.ipynb",
"Embedding": "01_layers.ipynb",
"SelfAttention": "01_layers.ipynb",
"PooledSelfAttention2d": "01_layers.ipynb",
"SimpleSelfAttention": "01_layers.ipynb",
"icnr_init": "01_layers.ipynb",
"PixelShuffle_ICNR": "01_layers.ipynb",
"sequential": "01_layers.ipynb",
"SequentialEx": "01_layers.ipynb",
"MergeLayer": "01_layers.ipynb",
"Cat": "01_layers.ipynb",
"SimpleCNN": "01_layers.ipynb",
"ProdLayer": "01_layers.ipynb",
"inplace_relu": "01_layers.ipynb",
"SEModule": "01_layers.ipynb",
"ResBlock": "01_layers.ipynb",
"SEBlock": "01_layers.ipynb",
"SEResNeXtBlock": "01_layers.ipynb",
"SeparableBlock": "01_layers.ipynb",
"TimeDistributed": "01_layers.ipynb",
"swish": "01_layers.ipynb",
"Swish": "01_layers.ipynb",
"MishJitAutoFn": "01_layers.ipynb",
"mish": "01_layers.ipynb",
"Mish": "01_layers.ipynb",
"ParameterModule": "01_layers.ipynb",
"children_and_parameters": "01_layers.ipynb",
"has_children": "01_layers.ipynb",
"flatten_model": "01_layers.ipynb",
"NoneReduce": "01_layers.ipynb",
"in_channels": "01_layers.ipynb",
"BaseLoss": "01a_losses.ipynb",
"CrossEntropyLossFlat": "01a_losses.ipynb",
"FocalLoss": "01a_losses.ipynb",
"FocalLossFlat": "01a_losses.ipynb",
"BCEWithLogitsLossFlat": "01a_losses.ipynb",
"BCELossFlat": "01a_losses.ipynb",
"MSELossFlat": "01a_losses.ipynb",
"L1LossFlat": "01a_losses.ipynb",
"LabelSmoothingCrossEntropy": "01a_losses.ipynb",
"LabelSmoothingCrossEntropyFlat": "01a_losses.ipynb",
"DiceLoss": "01a_losses.ipynb",
"fa_collate": "02_data.load.ipynb",
"fa_convert": "02_data.load.ipynb",
"SkipItemException": "02_data.load.ipynb",
"collate_error": "02_data.load.ipynb",
"DataLoader": "02_data.load.ipynb",
"TfmdDL": "03_data.core.ipynb",
"DataLoaders": "03_data.core.ipynb",
"FilteredBase": "03_data.core.ipynb",
"TfmdLists": "03_data.core.ipynb",
"decode_at": "03_data.core.ipynb",
"show_at": "03_data.core.ipynb",
"Datasets": "03_data.core.ipynb",
"test_set": "03_data.core.ipynb",
"DataLoaders.test_dl": "03_data.core.ipynb",
"fastai_cfg": "04_data.external.ipynb",
"fastai_path": "04_data.external.ipynb",
"URLs": "04_data.external.ipynb",
"untar_data": "04_data.external.ipynb",
"get_files": "05_data.transforms.ipynb",
"FileGetter": "05_data.transforms.ipynb",
"image_extensions": "05_data.transforms.ipynb",
"get_image_files": "05_data.transforms.ipynb",
"ImageGetter": "05_data.transforms.ipynb",
"get_text_files": "05_data.transforms.ipynb",
"ItemGetter": "05_data.transforms.ipynb",
"AttrGetter": "05_data.transforms.ipynb",
"RandomSplitter": "05_data.transforms.ipynb",
"TrainTestSplitter": "05_data.transforms.ipynb",
"IndexSplitter": "05_data.transforms.ipynb",
"EndSplitter": "05_data.transforms.ipynb",
"GrandparentSplitter": "05_data.transforms.ipynb",
"FuncSplitter": "05_data.transforms.ipynb",
"MaskSplitter": "05_data.transforms.ipynb",
"FileSplitter": "05_data.transforms.ipynb",
"ColSplitter": "05_data.transforms.ipynb",
"RandomSubsetSplitter": "05_data.transforms.ipynb",
"parent_label": "05_data.transforms.ipynb",
"RegexLabeller": "05_data.transforms.ipynb",
"ColReader": "05_data.transforms.ipynb",
"CategoryMap": "05_data.transforms.ipynb",
"Categorize": "05_data.transforms.ipynb",
"Category": "05_data.transforms.ipynb",
"MultiCategorize": "05_data.transforms.ipynb",
"MultiCategory": "05_data.transforms.ipynb",
"OneHotEncode": "05_data.transforms.ipynb",
"EncodedMultiCategorize": "05_data.transforms.ipynb",
"RegressionSetup": "05_data.transforms.ipynb",
"get_c": "05_data.transforms.ipynb",
"ToTensor": "05_data.transforms.ipynb",
"IntToFloatTensor": "05_data.transforms.ipynb",
"broadcast_vec": "05_data.transforms.ipynb",
"Normalize": "05_data.transforms.ipynb",
"TransformBlock": "06_data.block.ipynb",
"CategoryBlock": "06_data.block.ipynb",
"MultiCategoryBlock": "06_data.block.ipynb",
"RegressionBlock": "06_data.block.ipynb",
"DataBlock": "06_data.block.ipynb",
"DataBlock.summary": "06_data.block.ipynb",
"imagenet_stats": "07_vision.core.ipynb",
"cifar_stats": "07_vision.core.ipynb",
"mnist_stats": "07_vision.core.ipynb",
"n_px": "07_vision.core.ipynb",
"shape": "60_medical.imaging.ipynb",
"aspect": "07_vision.core.ipynb",
"Image.Image.reshape": "07_vision.core.ipynb",
"Image.Image.to_bytes_format": "07_vision.core.ipynb",
"Image.Image.to_thumb": "07_vision.core.ipynb",
"Image.Image.resize_max": "07_vision.core.ipynb",
"to_image": "07_vision.core.ipynb",
"load_image": "07_vision.core.ipynb",
"image2tensor": "07_vision.core.ipynb",
"PILBase": "07_vision.core.ipynb",
"PILImage": "07_vision.core.ipynb",
"PILImageBW": "07_vision.core.ipynb",
"PILMask": "07_vision.core.ipynb",
"OpenMask": "07_vision.core.ipynb",
"OpenMask.loss_func": "07_vision.core.ipynb",
"PILMask.create": "07_vision.core.ipynb",
"AddMaskCodes": "07_vision.core.ipynb",
"TensorPoint": "07_vision.core.ipynb",
"TensorPointCreate": "07_vision.core.ipynb",
"TensorPointCreate.loss_func": "07_vision.core.ipynb",
"TensorPoint.create": "07_vision.core.ipynb",
"get_annotations": "07_vision.core.ipynb",
"TensorBBox": "07_vision.core.ipynb",
"LabeledBBox": "07_vision.core.ipynb",
"encodes": "40_tabular.core.ipynb",
"PointScaler": "07_vision.core.ipynb",
"BBoxLabeler": "07_vision.core.ipynb",
"decodes": "40_tabular.core.ipynb",
"get_grid": "08_vision.data.ipynb",
"clip_remove_empty": "08_vision.data.ipynb",
"bb_pad": "08_vision.data.ipynb",
"ImageBlock": "08_vision.data.ipynb",
"MaskBlock": "08_vision.data.ipynb",
"PointBlock": "08_vision.data.ipynb",
"BBoxBlock": "08_vision.data.ipynb",
"PointBlock.__doc__": "08_vision.data.ipynb",
"BBoxBlock.__doc__": "08_vision.data.ipynb",
"BBoxLblBlock": "08_vision.data.ipynb",
"ImageDataLoaders": "08_vision.data.ipynb",
"ImageDataLoaders.from_csv": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_func": "08_vision.data.ipynb",
"ImageDataLoaders.from_path_re": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_re": "08_vision.data.ipynb",
"SegmentationDataLoaders": "08_vision.data.ipynb",
"RandTransform": "09_vision.augment.ipynb",
"TensorTypes": "09_vision.augment.ipynb",
"Image.Image.flip_lr": "09_vision.augment.ipynb",
"TensorImageBase.flip_lr": "09_vision.augment.ipynb",
"TensorPoint.flip_lr": "09_vision.augment.ipynb",
"TensorBBox.flip_lr": "09_vision.augment.ipynb",
"FlipItem": "09_vision.augment.ipynb",
"PILImage.dihedral": "09_vision.augment.ipynb",
"TensorImage.dihedral": "09_vision.augment.ipynb",
"TensorPoint.dihedral": "09_vision.augment.ipynb",
"TensorBBox.dihedral": "09_vision.augment.ipynb",
"DihedralItem": "09_vision.augment.ipynb",
"TensorBBox.crop_pad": "09_vision.augment.ipynb",
"TensorPoint.crop_pad": "09_vision.augment.ipynb",
"Image.Image.crop_pad": "09_vision.augment.ipynb",
"CropPad": "09_vision.augment.ipynb",
"RandomCrop": "09_vision.augment.ipynb",
"OldRandomCrop": "09_vision.augment.ipynb",
"Resize": "09_vision.augment.ipynb",
"RandomResizedCrop": "09_vision.augment.ipynb",
"RatioResize": "09_vision.augment.ipynb",
"affine_grid": "09_vision.augment.ipynb",
"TensorImage.affine_coord": "09_vision.augment.ipynb",
"TensorMask.affine_coord": "09_vision.augment.ipynb",
"TensorPoint.affine_coord": "09_vision.augment.ipynb",
"TensorBBox.affine_coord": "09_vision.augment.ipynb",
"AffineCoordTfm": "09_vision.augment.ipynb",
"RandomResizedCropGPU": "09_vision.augment.ipynb",
"mask_tensor": "09_vision.augment.ipynb",
"affine_mat": "09_vision.augment.ipynb",
"flip_mat": "09_vision.augment.ipynb",
"TensorImage.flip_batch": "09_vision.augment.ipynb",
"TensorMask.flip_batch": "09_vision.augment.ipynb",
"TensorPoint.flip_batch": "09_vision.augment.ipynb",
"TensorBBox.flip_batch": "09_vision.augment.ipynb",
"Flip": "09_vision.augment.ipynb",
"DeterministicDraw": "09_vision.augment.ipynb",
"DeterministicFlip": "09_vision.augment.ipynb",
"dihedral_mat": "09_vision.augment.ipynb",
"TensorImage.dihedral_batch": "09_vision.augment.ipynb",
"TensorMask.dihedral_batch": "09_vision.augment.ipynb",
"TensorPoint.dihedral_batch": "09_vision.augment.ipynb",
"TensorBBox.dihedral_batch": "09_vision.augment.ipynb",
"Dihedral": "09_vision.augment.ipynb",
"DeterministicDihedral": "09_vision.augment.ipynb",
"rotate_mat": "09_vision.augment.ipynb",
"TensorImage.rotate": "09_vision.augment.ipynb",
"TensorMask.rotate": "09_vision.augment.ipynb",
"TensorPoint.rotate": "09_vision.augment.ipynb",
"TensorBBox.rotate": "09_vision.augment.ipynb",
"Rotate": "09_vision.augment.ipynb",
"zoom_mat": "09_vision.augment.ipynb",
"TensorImage.zoom": "09_vision.augment.ipynb",
"TensorMask.zoom": "09_vision.augment.ipynb",
"TensorPoint.zoom": "09_vision.augment.ipynb",
"TensorBBox.zoom": "09_vision.augment.ipynb",
"Zoom": "09_vision.augment.ipynb",
"find_coeffs": "09_vision.augment.ipynb",
"apply_perspective": "09_vision.augment.ipynb",
"TensorImage.warp": "09_vision.augment.ipynb",
"TensorMask.warp": "09_vision.augment.ipynb",
"TensorPoint.warp": "09_vision.augment.ipynb",
"TensorBBox.warp": "09_vision.augment.ipynb",
"Warp": "09_vision.augment.ipynb",
"TensorImage.lighting": "09_vision.augment.ipynb",
"SpaceTfm": "09_vision.augment.ipynb",
"LightingTfm": "09_vision.augment.ipynb",
"TensorImage.brightness": "09_vision.augment.ipynb",
"Brightness": "09_vision.augment.ipynb",
"TensorImage.contrast": "09_vision.augment.ipynb",
"Contrast": "09_vision.augment.ipynb",
"grayscale": "09_vision.augment.ipynb",
"TensorImage.saturation": "09_vision.augment.ipynb",
"Saturation": "09_vision.augment.ipynb",
"rgb2hsv": "09_vision.augment.ipynb",
"hsv2rgb": "09_vision.augment.ipynb",
"TensorImage.hsv": "09_vision.augment.ipynb",
"HSVTfm": "09_vision.augment.ipynb",
"TensorImage.hue": "09_vision.augment.ipynb",
"Hue": "09_vision.augment.ipynb",
"cutout_gaussian": "09_vision.augment.ipynb",
"norm_apply_denorm": "09_vision.augment.ipynb",
"RandomErasing": "09_vision.augment.ipynb",
"setup_aug_tfms": "09_vision.augment.ipynb",
"aug_transforms": "09_vision.augment.ipynb",
"download_images": "09b_vision.utils.ipynb",
"resize_to": "09b_vision.utils.ipynb",
"verify_image": "09b_vision.utils.ipynb",
"verify_images": "09b_vision.utils.ipynb",
"resize_image": "09b_vision.utils.ipynb",
"resize_images": "09b_vision.utils.ipynb",
"Box.__getitem__": "09c_vision.widgets.ipynb",
"widget": "09c_vision.widgets.ipynb",
"carousel": "09c_vision.widgets.ipynb",
"ImagesCleaner": "09c_vision.widgets.ipynb",
"ImageClassifierCleaner": "09c_vision.widgets.ipynb",
"init_cnn": "11_vision.models.xresnet.ipynb",
"XResNet": "11_vision.models.xresnet.ipynb",
"xresnet18": "11_vision.models.xresnet.ipynb",
"xresnet34": "11_vision.models.xresnet.ipynb",
"xresnet50": "11_vision.models.xresnet.ipynb",
"xresnet101": "11_vision.models.xresnet.ipynb",
"xresnet152": "11_vision.models.xresnet.ipynb",
"xresnet18_deep": "11_vision.models.xresnet.ipynb",
"xresnet34_deep": "11_vision.models.xresnet.ipynb",
"xresnet50_deep": "11_vision.models.xresnet.ipynb",
"xresnet18_deeper": "11_vision.models.xresnet.ipynb",
"xresnet34_deeper": "11_vision.models.xresnet.ipynb",
"xresnet50_deeper": "11_vision.models.xresnet.ipynb",
"se_kwargs1": "11_vision.models.xresnet.ipynb",
"se_kwargs2": "11_vision.models.xresnet.ipynb",
"se_kwargs3": "11_vision.models.xresnet.ipynb",
"g0": "11_vision.models.xresnet.ipynb",
"g1": "11_vision.models.xresnet.ipynb",
"g2": "11_vision.models.xresnet.ipynb",
"g3": "11_vision.models.xresnet.ipynb",
"xse_resnet18": "11_vision.models.xresnet.ipynb",
"xse_resnext18": "11_vision.models.xresnet.ipynb",
"xresnext18": "11_vision.models.xresnet.ipynb",
"xse_resnet34": "11_vision.models.xresnet.ipynb",
"xse_resnext34": "11_vision.models.xresnet.ipynb",
"xresnext34": "11_vision.models.xresnet.ipynb",
"xse_resnet50": "11_vision.models.xresnet.ipynb",
"xse_resnext50": "11_vision.models.xresnet.ipynb",
"xresnext50": "11_vision.models.xresnet.ipynb",
"xse_resnet101": "11_vision.models.xresnet.ipynb",
"xse_resnext101": "11_vision.models.xresnet.ipynb",
"xresnext101": "11_vision.models.xresnet.ipynb",
"xse_resnet152": "11_vision.models.xresnet.ipynb",
"xsenet154": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deeper": "11_vision.models.xresnet.ipynb",
"Optimizer": "12_optimizer.ipynb",
"sgd_step": "12_optimizer.ipynb",
"weight_decay": "12_optimizer.ipynb",
"weight_decay.defaults": "12_optimizer.ipynb",
"l2_reg": "12_optimizer.ipynb",
"l2_reg.defaults": "12_optimizer.ipynb",
"average_grad": "12_optimizer.ipynb",
"average_grad.defaults": "12_optimizer.ipynb",
"average_sqr_grad": "12_optimizer.ipynb",
"average_sqr_grad.defaults": "12_optimizer.ipynb",
"momentum_step": "12_optimizer.ipynb",
"SGD": "12_optimizer.ipynb",
"rms_prop_step": "12_optimizer.ipynb",
"rms_prop_step.defaults": "12_optimizer.ipynb",
"RMSProp": "12_optimizer.ipynb",
"step_stat": "12_optimizer.ipynb",
"debias": "12_optimizer.ipynb",
"adam_step": "12_optimizer.ipynb",
"Adam": "12_optimizer.ipynb",
"radam_step": "12_optimizer.ipynb",
"RAdam": "12_optimizer.ipynb",
"qhadam_step": "12_optimizer.ipynb",
"QHAdam": "12_optimizer.ipynb",
"larc_layer_lr": "12_optimizer.ipynb",
"larc_layer_lr.defaults": "12_optimizer.ipynb",
"larc_step": "12_optimizer.ipynb",
"Larc": "12_optimizer.ipynb",
"lamb_step": "12_optimizer.ipynb",
"Lamb": "12_optimizer.ipynb",
"Lookahead": "12_optimizer.ipynb",
"ranger": "12_optimizer.ipynb",
"detuplify_pg": "12_optimizer.ipynb",
"set_item_pg": "12_optimizer.ipynb",
"pytorch_hp_map": "12_optimizer.ipynb",
"OptimWrapper": "12_optimizer.ipynb",
"Callback": "13_callback.core.ipynb",
"TrainEvalCallback": "13_callback.core.ipynb",
"GatherPredsCallback": "13_callback.core.ipynb",
"FetchPredsCallback": "13_callback.core.ipynb",
"defaults.lr": "13a_learner.ipynb",
"replacing_yield": "13a_learner.ipynb",
"mk_metric": "13a_learner.ipynb",
"save_model": "13a_learner.ipynb",
"load_model": "13a_learner.ipynb",
"SkipToEpoch": "13a_learner.ipynb",
"Learner": "13a_learner.ipynb",
"before_batch_cb": "13a_learner.ipynb",
"Learner.save": "13a_learner.ipynb",
"Learner.load": "13a_learner.ipynb",
"Learner.export": "13a_learner.ipynb",
"load_learner": "13a_learner.ipynb",
"Metric": "13a_learner.ipynb",
"AvgMetric": "13a_learner.ipynb",
"AvgLoss": "13a_learner.ipynb",
"AvgSmoothLoss": "13a_learner.ipynb",
"ValueMetric": "13a_learner.ipynb",
"Recorder": "13a_learner.ipynb",
"CastToTensor": "13a_learner.ipynb",
"Learner.freeze_to": "13a_learner.ipynb",
"Learner.freeze": "13a_learner.ipynb",
"Learner.unfreeze": "13a_learner.ipynb",
"Learner.tta": "13a_learner.ipynb",
"flatten_check": "13b_metrics.ipynb",
"AccumMetric": "13b_metrics.ipynb",
"skm_to_fastai": "13b_metrics.ipynb",
"optim_metric": "13b_metrics.ipynb",
"accuracy": "13b_metrics.ipynb",
"error_rate": "13b_metrics.ipynb",
"top_k_accuracy": "13b_metrics.ipynb",
"APScoreBinary": "13b_metrics.ipynb",
"BalancedAccuracy": "13b_metrics.ipynb",
"BrierScore": "13b_metrics.ipynb",
"CohenKappa": "13b_metrics.ipynb",
"F1Score": "13b_metrics.ipynb",
"FBeta": "13b_metrics.ipynb",
"HammingLoss": "13b_metrics.ipynb",
"Jaccard": "13b_metrics.ipynb",
"Precision": "13b_metrics.ipynb",
"Recall": "13b_metrics.ipynb",
"RocAuc": "13b_metrics.ipynb",
"RocAucBinary": "13b_metrics.ipynb",
"MatthewsCorrCoef": "13b_metrics.ipynb",
"accuracy_multi": "13b_metrics.ipynb",
"APScoreMulti": "13b_metrics.ipynb",
"BrierScoreMulti": "13b_metrics.ipynb",
"F1ScoreMulti": "13b_metrics.ipynb",
"FBetaMulti": "13b_metrics.ipynb",
"HammingLossMulti": "13b_metrics.ipynb",
"JaccardMulti": "13b_metrics.ipynb",
"MatthewsCorrCoefMulti": "13b_metrics.ipynb",
"PrecisionMulti": "13b_metrics.ipynb",
"RecallMulti": "13b_metrics.ipynb",
"RocAucMulti": "13b_metrics.ipynb",
"mse": "13b_metrics.ipynb",
"rmse": "13b_metrics.ipynb",
"rmse.__doc__": "13b_metrics.ipynb",
"mae": "13b_metrics.ipynb",
"msle": "13b_metrics.ipynb",
"exp_rmspe": "13b_metrics.ipynb",
"exp_rmspe.__doc__": "13b_metrics.ipynb",
"ExplainedVariance": "13b_metrics.ipynb",
"R2Score": "13b_metrics.ipynb",
"PearsonCorrCoef": "13b_metrics.ipynb",
"SpearmanCorrCoef": "13b_metrics.ipynb",
"foreground_acc": "13b_metrics.ipynb",
"Dice": "13b_metrics.ipynb",
"DiceMulti": "13b_metrics.ipynb",
"JaccardCoeff": "13b_metrics.ipynb",
"CorpusBLEUMetric": "13b_metrics.ipynb",
"Perplexity": "13b_metrics.ipynb",
"perplexity": "13b_metrics.ipynb",
"LossMetric": "13b_metrics.ipynb",
"LossMetrics": "13b_metrics.ipynb",
"annealer": "14_callback.schedule.ipynb",
"sched_lin": "14_callback.schedule.ipynb",
"sched_cos": "14_callback.schedule.ipynb",
"sched_no": "14_callback.schedule.ipynb",
"sched_exp": "14_callback.schedule.ipynb",
"SchedLin": "14_callback.schedule.ipynb",
"SchedCos": "14_callback.schedule.ipynb",
"SchedNo": "14_callback.schedule.ipynb",
"SchedExp": "14_callback.schedule.ipynb",
"SchedLin.__doc__": "14_callback.schedule.ipynb",
"SchedCos.__doc__": "14_callback.schedule.ipynb",
"SchedExp.__doc__": "14_callback.schedule.ipynb",
"SchedPoly": "14_callback.schedule.ipynb",
"combine_scheds": "14_callback.schedule.ipynb",
"combined_cos": "14_callback.schedule.ipynb",
"ParamScheduler": "14_callback.schedule.ipynb",
"Learner.fit_one_cycle": "14_callback.schedule.ipynb",
"Recorder.plot_sched": "14_callback.schedule.ipynb",
"Learner.fit_flat_cos": "14_callback.schedule.ipynb",
"Learner.fit_sgdr": "14_callback.schedule.ipynb",
"Learner.fine_tune": "14_callback.schedule.ipynb",
"LRFinder": "14_callback.schedule.ipynb",
"valley": "14_callback.schedule.ipynb",
"slide": "14_callback.schedule.ipynb",
"minimum": "14_callback.schedule.ipynb",
"steep": "14_callback.schedule.ipynb",
"Recorder.plot_lr_find": "14_callback.schedule.ipynb",
"Learner.lr_find": "14_callback.schedule.ipynb",
"CollectDataCallback": "14a_callback.data.ipynb",
"WeightedDL": "14a_callback.data.ipynb",
"Datasets.weighted_dataloaders": "14a_callback.data.ipynb",
"DataBlock.weighted_dataloaders": "14a_callback.data.ipynb",
"PartialDL": "14a_callback.data.ipynb",
"FilteredBase.partial_dataloaders": "14a_callback.data.ipynb",
"Hook": "15_callback.hook.ipynb",
"hook_output": "15_callback.hook.ipynb",
"Hooks": "15_callback.hook.ipynb",
"hook_outputs": "15_callback.hook.ipynb",
"dummy_eval": "15_callback.hook.ipynb",
"model_sizes": "15_callback.hook.ipynb",
"num_features_model": "15_callback.hook.ipynb",
"has_params": "15_callback.hook.ipynb",
"HookCallback": "15_callback.hook.ipynb",
"total_params": "15_callback.hook.ipynb",
"layer_info": "15_callback.hook.ipynb",
"module_summary": "15_callback.hook.ipynb",
"Learner.summary": "15_callback.hook.ipynb",
"ActivationStats": "15_callback.hook.ipynb",
"UnetBlock": "15a_vision.models.unet.ipynb",
"ResizeToOrig": "15a_vision.models.unet.ipynb",
"DynamicUnet": "15a_vision.models.unet.ipynb",
"ProgressCallback": "16_callback.progress.ipynb",
"Learner.no_bar": "16_callback.progress.ipynb",
"ShowGraphCallback": "16_callback.progress.ipynb",
"CSVLogger": "16_callback.progress.ipynb",
"TerminateOnNaNCallback": "17_callback.tracker.ipynb",
"TrackerCallback": "17_callback.tracker.ipynb",
"EarlyStoppingCallback": "17_callback.tracker.ipynb",
"SaveModelCallback": "17_callback.tracker.ipynb",
"ReduceLROnPlateau": "17_callback.tracker.ipynb",
"MixedPrecision": "18_callback.fp16.ipynb",
"FP16TestCallback": "18_callback.fp16.ipynb",
"Learner.to_fp16": "18_callback.fp16.ipynb",
"Learner.to_fp32": "18_callback.fp16.ipynb",
"get_master": "18_callback.fp16.ipynb",
"to_master_grads": "18_callback.fp16.ipynb",
"to_model_params": "18_callback.fp16.ipynb",
"test_overflow": "18_callback.fp16.ipynb",
"grad_overflow": "18_callback.fp16.ipynb",
"copy_clone": "18_callback.fp16.ipynb",
"ModelToHalf": "18_callback.fp16.ipynb",
"NonNativeMixedPrecision": "18_callback.fp16.ipynb",
"Learner.to_non_native_fp16": "18_callback.fp16.ipynb",
"Learner.to_non_native_fp32": "18_callback.fp16.ipynb",
"ShortEpochCallback": "18a_callback.training.ipynb",
"GradientAccumulation": "18a_callback.training.ipynb",
"GradientClip": "18a_callback.training.ipynb",
"set_bn_eval": "18a_callback.training.ipynb",
"BnFreeze": "18a_callback.training.ipynb",
"bn_types": "18a_callback.training.ipynb",
"ChannelsLast": "18a_callback.training.ipynb",
"MCDropoutCallback": "18b_callback.preds.ipynb",
"reduce_loss": "19_callback.mixup.ipynb",
"MixHandler": "19_callback.mixup.ipynb",
"MixUp": "19_callback.mixup.ipynb",
"CutMix": "19_callback.mixup.ipynb",
"Interpretation": "20_interpret.ipynb",
"ClassificationInterpretation": "20_interpret.ipynb",
"SegmentationInterpretation": "20_interpret.ipynb",
"DataParallel.reset": "20a_distributed.ipynb",
"ParallelTrainer": "20a_distributed.ipynb",
"Learner.to_parallel": "20a_distributed.ipynb",
"Learner.detach_parallel": "20a_distributed.ipynb",
"Learner.parallel_ctx": "20a_distributed.ipynb",
"DistributedDataParallel.reset": "20a_distributed.ipynb",
"setup_distrib": "20a_distributed.ipynb",
"teardown_distrib": "20a_distributed.ipynb",
"DistributedDL": "20a_distributed.ipynb",
"DistributedTrainer": "20a_distributed.ipynb",
"Learner.to_distributed": "20a_distributed.ipynb",
"Learner.detach_distributed": "20a_distributed.ipynb",
"Learner.distrib_ctx": "20a_distributed.ipynb",
"rank0_first": "20a_distributed.ipynb",
"has_pool_type": "21_vision.learner.ipynb",
"cut_model": "21_vision.learner.ipynb",
"create_body": "21_vision.learner.ipynb",
"create_head": "21_vision.learner.ipynb",
"default_split": "21_vision.learner.ipynb",
"model_meta": "21_vision.learner.ipynb",
"add_head": "21_vision.learner.ipynb",
"create_vision_model": "21_vision.learner.ipynb",
"TimmBody": "21_vision.learner.ipynb",
"create_timm_model": "21_vision.learner.ipynb",
"vision_learner": "21_vision.learner.ipynb",
"create_unet_model": "21_vision.learner.ipynb",
"unet_learner": "21_vision.learner.ipynb",
"create_cnn_model": "21_vision.learner.ipynb",
"cnn_learner": "21_vision.learner.ipynb",
"GANModule": "24_vision.gan.ipynb",
"basic_critic": "24_vision.gan.ipynb",
"AddChannels": "24_vision.gan.ipynb",
"basic_generator": "24_vision.gan.ipynb",
"DenseResBlock": "24_vision.gan.ipynb",
"gan_critic": "24_vision.gan.ipynb",
"GANLoss": "24_vision.gan.ipynb",
"AdaptiveLoss": "24_vision.gan.ipynb",
"accuracy_thresh_expand": "24_vision.gan.ipynb",
"set_freeze_model": "24_vision.gan.ipynb",
"GANTrainer": "24_vision.gan.ipynb",
"FixedGANSwitcher": "24_vision.gan.ipynb",
"AdaptiveGANSwitcher": "24_vision.gan.ipynb",
"GANDiscriminativeLR": "24_vision.gan.ipynb",
"InvisibleTensor": "24_vision.gan.ipynb",
"generate_noise": "24_vision.gan.ipynb",
"gan_loss_from_func": "24_vision.gan.ipynb",
"GANLearner": "24_vision.gan.ipynb",
"GANLearner.from_learners": "24_vision.gan.ipynb",
"GANLearner.wgan": "24_vision.gan.ipynb",
"spec_add_spaces": "30_text.core.ipynb",
"rm_useless_spaces": "30_text.core.ipynb",
"replace_rep": "30_text.core.ipynb",
"replace_wrep": "30_text.core.ipynb",
"fix_html": "30_text.core.ipynb",
"replace_all_caps": "30_text.core.ipynb",
"replace_maj": "30_text.core.ipynb",
"lowercase": "30_text.core.ipynb",
"replace_space": "30_text.core.ipynb",
"defaults.text_spec_tok": "30_text.core.ipynb",
"defaults.text_proc_rules": "30_text.core.ipynb",
"defaults.text_postproc_rules": "30_text.core.ipynb",
"BaseTokenizer": "30_text.core.ipynb",
"SpacyTokenizer": "30_text.core.ipynb",
"WordTokenizer": "30_text.core.ipynb",
"TokenizeWithRules": "30_text.core.ipynb",
"tokenize1": "30_text.core.ipynb",
"parallel_tokenize": "30_text.core.ipynb",
"fn_counter_pkl": "30_text.core.ipynb",
"fn_lengths_pkl": "30_text.core.ipynb",
"tokenize_folder": "30_text.core.ipynb",
"tokenize_files": "30_text.core.ipynb",
"tokenize_texts": "30_text.core.ipynb",
"tokenize_df": "30_text.core.ipynb",
"tokenize_csv": "30_text.core.ipynb",
"load_tokenized_csv": "30_text.core.ipynb",
"Tokenizer": "30_text.core.ipynb",
"eu_langs": "30_text.core.ipynb",
"SentencePieceTokenizer": "30_text.core.ipynb",
"SubwordTokenizer": "30_text.core.ipynb",
"reverse_text": "31_text.data.ipynb",
"make_vocab": "31_text.data.ipynb",
"TensorText": "31_text.data.ipynb",
"LMTensorText": "31_text.data.ipynb",
"TensorText.__doc__": "31_text.data.ipynb",
"LMTensorText.__doc__": "31_text.data.ipynb",
"Numericalize": "31_text.data.ipynb",
"LMDataLoader": "31_text.data.ipynb",
"Pad_Input": "31_text.data.ipynb",
"pad_input": "31_text.data.ipynb",
"pad_chunk": "31_text.data.ipynb",
"pad_input_chunk": "31_text.data.ipynb",
"Pad_Chunk": "31_text.data.ipynb",
"SortedDL": "31_text.data.ipynb",
"TextBlock": "31_text.data.ipynb",
"TextDataLoaders": "31_text.data.ipynb",
"TextDataLoaders.from_csv": "31_text.data.ipynb",
"dropout_mask": "32_text.models.awdlstm.ipynb",
"RNNDropout": "32_text.models.awdlstm.ipynb",
"WeightDropout": "32_text.models.awdlstm.ipynb",
"EmbeddingDropout": "32_text.models.awdlstm.ipynb",
"AWD_LSTM": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_config": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_config": "32_text.models.awdlstm.ipynb",
"LinearDecoder": "33_text.models.core.ipynb",
"SequentialRNN": "33_text.models.core.ipynb",
"get_language_model": "33_text.models.core.ipynb",
"SentenceEncoder": "33_text.models.core.ipynb",
"masked_concat_pool": "33_text.models.core.ipynb",
"PoolingLinearClassifier": "33_text.models.core.ipynb",
"get_text_classifier": "33_text.models.core.ipynb",
"ModelResetter": "34_callback.rnn.ipynb",
"RNNCallback": "34_callback.rnn.ipynb",
"RNNRegularizer": "34_callback.rnn.ipynb",
"rnn_cbs": "34_callback.rnn.ipynb",
"match_embeds": "37_text.learner.ipynb",
"load_ignore_keys": "37_text.learner.ipynb",
"clean_raw_keys": "37_text.learner.ipynb",
"load_model_text": "37_text.learner.ipynb",
"TextLearner": "37_text.learner.ipynb",
"decode_spec_tokens": "37_text.learner.ipynb",
"LMLearner": "37_text.learner.ipynb",
"language_model_learner": "37_text.learner.ipynb",
"text_classifier_learner": "37_text.learner.ipynb",
"make_date": "40_tabular.core.ipynb",
"add_datepart": "40_tabular.core.ipynb",
"add_elapsed_times": "40_tabular.core.ipynb",
"cont_cat_split": "40_tabular.core.ipynb",
"df_shrink_dtypes": "40_tabular.core.ipynb",
"df_shrink": "40_tabular.core.ipynb",
"Tabular": "40_tabular.core.ipynb",
"TabularPandas": "40_tabular.core.ipynb",
"TabularProc": "40_tabular.core.ipynb",
"Categorify": "40_tabular.core.ipynb",
"setups": "40_tabular.core.ipynb",
"FillStrategy": "40_tabular.core.ipynb",
"FillMissing": "40_tabular.core.ipynb",
"ReadTabBatch": "40_tabular.core.ipynb",
"TabDataLoader": "40_tabular.core.ipynb",
"TabularDataLoaders": "41_tabular.data.ipynb",
"TabularDataLoaders.from_csv": "41_tabular.data.ipynb",
"emb_sz_rule": "42_tabular.model.ipynb",
"get_emb_sz": "42_tabular.model.ipynb",
"TabularModel": "42_tabular.model.ipynb",
"tabular_config": "42_tabular.model.ipynb",
"TabularLearner": "43_tabular.learner.ipynb",
"tabular_learner": "43_tabular.learner.ipynb",
"TabularCollab": "45_collab.ipynb",
"CollabDataLoaders": "45_collab.ipynb",
"CollabDataLoaders.from_csv": "45_collab.ipynb",
"EmbeddingDotBias": "45_collab.ipynb",
"EmbeddingNN": "45_collab.ipynb",
"collab_learner": "45_collab.ipynb",
"get_dicom_files": "60_medical.imaging.ipynb",
"Path.dcmread": "60_medical.imaging.ipynb",
"TensorDicom": "60_medical.imaging.ipynb",
"PILDicom": "60_medical.imaging.ipynb",
"Path.png16read": "60_medical.imaging.ipynb",
"pixels": "60_medical.imaging.ipynb",
"scaled_px": "60_medical.imaging.ipynb",
"array_freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.hist_scaled_pt": "60_medical.imaging.ipynb",
"Tensor.hist_scaled": "60_medical.imaging.ipynb",
"DcmDataset.hist_scaled": "60_medical.imaging.ipynb",
"Tensor.windowed": "60_medical.imaging.ipynb",
"DcmDataset.windowed": "60_medical.imaging.ipynb",
"dicom_windows": "60_medical.imaging.ipynb",
"TensorCTScan": "60_medical.imaging.ipynb",
"PILCTScan": "60_medical.imaging.ipynb",
"DcmDataset.show": "60_medical.imaging.ipynb",
"DcmDataset.pct_in_window": "60_medical.imaging.ipynb",
"uniform_blur2d": "60_medical.imaging.ipynb",
"gauss_blur2d": "60_medical.imaging.ipynb",
"Tensor.mask_from_blur": "60_medical.imaging.ipynb",
"DcmDataset.mask_from_blur": "60_medical.imaging.ipynb",
"mask2bbox": "60_medical.imaging.ipynb",
"crop_resize": "60_medical.imaging.ipynb",
"Tensor.to_nchan": "60_medical.imaging.ipynb",
"DcmDataset.to_nchan": "60_medical.imaging.ipynb",
"Tensor.to_3chan": "60_medical.imaging.ipynb",
"DcmDataset.to_3chan": "60_medical.imaging.ipynb",
"Tensor.save_jpg": "60_medical.imaging.ipynb",
"DcmDataset.save_jpg": "60_medical.imaging.ipynb",
"Tensor.to_uint16": "60_medical.imaging.ipynb",
"DcmDataset.to_uint16": "60_medical.imaging.ipynb",
"Tensor.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.set_pixels": "60_medical.imaging.ipynb",
"DcmDataset.pixel_array": "60_medical.imaging.ipynb",
"DcmDataset.zoom": "60_medical.imaging.ipynb",
"DcmDataset.zoom_to": "60_medical.imaging.ipynb",
"DcmDataset.as_dict": "60_medical.imaging.ipynb",
"pd.DataFrame.from_dicoms": "60_medical.imaging.ipynb",
"DicomSegmentationDataLoaders": "60_medical.imaging.ipynb",
"WandbCallback": "70_callback.wandb.ipynb",
"Learner.gather_args": "70_callback.wandb.ipynb",
"log_dataset": "70_callback.wandb.ipynb",
"log_model": "70_callback.wandb.ipynb",
"TensorBoardBaseCallback": "70a_callback.tensorboard.ipynb",
"TensorBoardCallback": "70a_callback.tensorboard.ipynb",
"TensorBoardProjectorCallback": "70a_callback.tensorboard.ipynb",
"projector_word_embeddings": "70a_callback.tensorboard.ipynb",
"NeptuneCallback": "70b_callback.neptune.ipynb",
"json_clean": "70c_callback.captum.ipynb",
"jsonutil.json_clean": "70c_callback.captum.ipynb",
"CaptumInterpretation": "70c_callback.captum.ipynb",
"CaptumInterpretation.insights": "70c_callback.captum.ipynb",
"CometCallback": "70d_callback.comet.ipynb",
"synth_dbunch": "97_test_utils.ipynb",
"RegModel": "97_test_utils.ipynb",
"synth_learner": "97_test_utils.ipynb",
"VerboseCallback": "97_test_utils.ipynb",
"get_env": "97_test_utils.ipynb",
"try_import": "97_test_utils.ipynb",
"nvidia_smi": "97_test_utils.ipynb",
"nvidia_mem": "97_test_utils.ipynb",
"show_install": "97_test_utils.ipynb",
"PYTORCH_URL": "99_pytorch_doc.ipynb",
"pytorch_doc_link": "99_pytorch_doc.ipynb"}
modules = ["torch_core.py",
"layers.py",
"losses.py",
"data/load.py",
"data/core.py",
"data/external.py",
"data/transforms.py",
"data/block.py",
"vision/core.py",
"vision/data.py",
"vision/augment.py",
"vision/utils.py",
"vision/widgets.py",
"vision/models/xresnet.py",
"optimizer.py",
"callback/core.py",
"learner.py",
"metrics.py",
"callback/schedule.py",
"callback/data.py",
"callback/hook.py",
"vision/models/unet.py",
"callback/progress.py",
"callback/tracker.py",
"callback/fp16.py",
"callback/training.py",
"callback/preds.py",
"callback/mixup.py",
"interpret.py",
"distributed.py",
"vision/learner.py",
"vision/gan.py",
"text/core.py",
"text/data.py",
"text/models/awdlstm.py",
"text/models/core.py",
"callback/rnn.py",
"text/learner.py",
"tabular/core.py",
"tabular/data.py",
"tabular/model.py",
"tabular/learner.py",
"collab.py",
"medical/imaging.py",
"medical/text.py",
"callback/wandb.py",
"callback/tensorboard.py",
"callback/neptune.py",
"callback/captum.py",
"callback/comet.py",
"test_utils.py",
"_pytorch_doc.py"]
doc_url = "https://docs.fast.ai/"
git_url = "https://github.com/fastai/fastai/tree/master/"
def custom_doc_links(name):
from nbdev.showdoc import try_external_doc_link
return try_external_doc_link(name, ['fastcore', 'nbdev'])
| 44,456 | 48.396667 | 74 | py |
fastai | fastai-master/fastai/fp16_utils.py | #Code directly taken from NVIDIA apex: https://github.com/NVIDIA/apex
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
| 6,957 | 37.230769 | 337 | py |
fastai | fastai-master/fastai/optimizer.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/12_optimizer.ipynb.
# %% ../nbs/12_optimizer.ipynb 2
from __future__ import annotations
from .torch_basics import *
# %% auto 0
__all__ = ['pytorch_hp_map', 'Optimizer', 'sgd_step', 'weight_decay', 'l2_reg', 'average_grad', 'average_sqr_grad',
'momentum_step', 'SGD', 'rms_prop_step', 'RMSProp', 'step_stat', 'debias', 'adam_step', 'Adam', 'radam_step',
'RAdam', 'qhadam_step', 'QHAdam', 'larc_layer_lr', 'larc_step', 'Larc', 'lamb_step', 'Lamb', 'Lookahead',
'ranger', 'detuplify_pg', 'set_item_pg', 'OptimWrapper']
# %% ../nbs/12_optimizer.ipynb 6
class _BaseOptimizer():
"Common functionality between `Optimizer` and `OptimWrapper`"
def all_params(self,
n:slice|int=slice(None), # Extended slicing over the optimizer `param_lists`
with_grad:bool=False # Get all param tuples. If `True` select only those with a gradient
):
res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg)
return L(o for o in res if hasattr(o[0], 'grad') and o[0].grad is not None) if with_grad else res
def _set_require_grad(self,
rg:bool, # Requires grad: if `True` sets gradient for parameters, else uses state `state["force_train"]`
p:Tensor, # Parameters to set gradient
pg, # Param groups (unused but needed because unpack *o)
state: dict,
h # Hyperparameter (unused but needed because unpack *o)
):
p.requires_grad_(rg or state.get('force_train', False))
def freeze_to(self,
n:int # Freeze up to `n` layers
):
self.frozen_idx = n if n >= 0 else len(self.param_lists) + n
if self.frozen_idx >= len(self.param_lists):
warn(f"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.")
for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o)
for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o)
def freeze(self):
assert(len(self.param_lists)>1)
self.freeze_to(-1)
def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper)
def _set_hyper(self,
k, # Hyperparameter key
v # Hyperparameter value
):
for v_,h in zip(v, self.hypers): h[k] = v_
def set_hyper(self,
k, # Hyperparameter key or slice of keys
v # Hyperparameter value or slice of values
):
if isinstance(v, slice):
if v.start: v = even_mults(v.start, v.stop, len(self.param_lists))
else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop]
v = L(v, use_list=None)
if len(v)==1: v = v*len(self.param_lists)
assert len(v) == len(self.hypers), f"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups."
self._set_hyper(k, v)
def unfreeze(self): self.freeze_to(0)
@property
def param_groups(self): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)]
@param_groups.setter
def param_groups(self,
v:dict # List of dicts to set `params` and other hyper parameters
):
for pg,v_ in zip(self.param_lists,v): pg = v_['params']
for hyper,v_ in zip(self.hypers,v):
for k,t in v_.items():
if k != 'params': hyper[k] = t
# %% ../nbs/12_optimizer.ipynb 8
def _update(
state:dict,
new=None # New values to update `state` dict
):
if new is None: return state
if isinstance(new, dict): state.update(new)
return state
# %% ../nbs/12_optimizer.ipynb 10
class Optimizer(_BaseOptimizer):
"Base optimizer class for the fastai library, updating `params` with `cbs`"
_keep_on_clear = ['force_train', 'do_wd']
def __init__(self,
params:Tensor|Iterable, # Model parameters
cbs:callable|MutableSequence, # `Optimizer` step callbacks
**defaults # Hyper parameters default values
):
if 'train_bn' in defaults.keys():
_ = defaults.pop('train_bn')
warn('Setting `train_bn` in `Optimizer` has no effect. Set `train_bn` on `Learner` init instead')
params = L(params)
self.cbs,self.state = L(cbs),defaultdict(dict)
defaults = merge(*self.cbs.attrgot('defaults'), defaults)
self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params])
self.hypers = L({} for _ in range_of(self.param_lists))
self.set_hypers(**defaults)
self.frozen_idx = 0
def zero_grad(self):
for p,*_ in self.all_params(with_grad=True):
p.grad.detach_()
p.grad.zero_()
def step(self, closure=None):
if closure is not None: raise NotImplementedError("fastai optimizers currently do not support closure")
for p,pg,state,hyper in self.all_params(with_grad=True):
for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper}))
self.state[p] = state
def clear_state(self):
for p,pg,state,hyper in self.all_params():
self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state}
def state_dict(self):
state = [self.state[p] for p,*_ in self.all_params()]
return {'state': state, 'hypers': self.hypers}
def load_state_dict(self,
sd:dict # State dict with `hypers` and `state` to load on the optimizer
):
assert len(sd["hypers"]) == len(self.param_lists)
assert len(sd["state"]) == sum([len(pg) for pg in self.param_lists])
self.hypers = sd['hypers']
self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])}
# %% ../nbs/12_optimizer.ipynb 21
def sgd_step(p, lr, **kwargs):
p.data.add_(p.grad.data, alpha=-lr)
# %% ../nbs/12_optimizer.ipynb 24
def weight_decay(p, lr, wd, do_wd=True, **kwargs):
"Weight decay as decaying `p` with `lr*wd`"
if do_wd and wd!=0: p.data.mul_(1 - lr*wd)
weight_decay.defaults = dict(wd=0.)
# %% ../nbs/12_optimizer.ipynb 26
def l2_reg(p, lr, wd, do_wd=True, **kwargs):
"L2 regularization as adding `wd*p` to `p.grad`"
if do_wd and wd!=0: p.grad.data.add_(p.data, alpha=wd)
l2_reg.defaults = dict(wd=0.)
# %% ../nbs/12_optimizer.ipynb 41
def average_grad(p, mom, dampening=False, grad_avg=None, **kwargs):
"Keeps track of the avg grads of `p` in `state` with `mom`."
if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)
damp = 1-mom if dampening else 1.
grad_avg.mul_(mom).add_(p.grad.data, alpha=damp)
return {'grad_avg': grad_avg}
average_grad.defaults = dict(mom=0.9)
# %% ../nbs/12_optimizer.ipynb 44
def average_sqr_grad(p, sqr_mom, dampening=True, sqr_avg=None, **kwargs):
if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)
damp = 1-sqr_mom if dampening else 1.
sqr_avg.mul_(sqr_mom).addcmul_(p.grad.data, p.grad.data, value=damp)
return {'sqr_avg': sqr_avg}
average_sqr_grad.defaults = dict(sqr_mom=0.99)
# %% ../nbs/12_optimizer.ipynb 62
def momentum_step(p, lr, grad_avg, **kwargs):
"Step for SGD with momentum with `lr`"
p.data.add_(grad_avg, alpha=-lr)
# %% ../nbs/12_optimizer.ipynb 63
def SGD(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0., # Gradient moving average (β1) coefficient
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization (SGD)
) -> Optimizer:
"A SGD `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom != 0: cbs.append(average_grad)
cbs.append(sgd_step if mom==0 else momentum_step)
return Optimizer(params, cbs, lr=lr, mom=mom, wd=wd)
# %% ../nbs/12_optimizer.ipynb 70
def rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):
"Step for RMSProp with momentum with `lr`"
denom = sqr_avg.sqrt().add_(eps)
p.data.addcdiv_((grad_avg if grad_avg is not None else p.grad), denom, value=-lr)
rms_prop_step.defaults = dict(eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 71
def RMSProp(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0., # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization (RMSProp)
) -> Optimizer:
"A RMSProp `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += ([average_sqr_grad] if mom==0. else [average_grad, average_sqr_grad])
cbs.append(rms_prop_step)
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, wd=wd)
# %% ../nbs/12_optimizer.ipynb 76
def step_stat(p, step=0, **kwargs):
"Register the number of steps done in `state` for `p`"
step += 1
return {'step' : step}
# %% ../nbs/12_optimizer.ipynb 78
def debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)
# %% ../nbs/12_optimizer.ipynb 79
def adam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for Adam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(grad_avg, (sqr_avg/debias2).sqrt() + eps, value = -lr / debias1)
return p
adam_step._defaults = dict(eps=1e-5)
# %% ../nbs/12_optimizer.ipynb 80
def Adam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0.01, # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay (AdamW) or L2 regularization (Adam)
) -> Optimizer:
"A Adam/AdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 85
def radam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, beta, **kwargs):
"Step for RAdam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r_inf = 2/(1-sqr_mom) - 1
r = r_inf - 2*step*sqr_mom**step/(1-sqr_mom**step)
if r > 5:
v = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))
denom = (sqr_avg/debias2).sqrt()
if eps: denom += eps
if beta: denom = F.softplus(denom, beta)
p.data.addcdiv_(grad_avg, denom, value = -lr*v / debias1)
else: p.data.add_(grad_avg, alpha=-lr / debias1)
return p
radam_step._defaults = dict(eps=1e-5)
# %% ../nbs/12_optimizer.ipynb 86
def RAdam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
beta:float=0., # Set to enable SAdam
decouple_wd:bool=True # Apply true weight decay (RAdamW) or L2 regularization (RAdam)
) -> Optimizer:
"A RAdam/RAdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, radam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd, beta=beta)
# %% ../nbs/12_optimizer.ipynb 92
def qhadam_step(p, lr, mom, sqr_mom, sqr_avg, nu_1, nu_2, step, grad_avg, eps, **kwargs):
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(((1-nu_1) * p.grad.data) + (nu_1 * (grad_avg / debias1)),
(((1 - nu_2) * (p.grad.data)**2) + (nu_2 * (sqr_avg / debias2))).sqrt() + eps,
value = -lr)
return p
qhadam_step._defaults = dict(eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 93
def QHAdam(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.999, # Gradient moving average (β1) coefficient
sqr_mom:float=0.999, # Gradient squared moving average (β2) coefficient
nu_1:float=0.7, # QH immediate discount factor
nu_2:float=1.0, # QH momentum discount factor
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True, # Apply true weight decay (QHAdamW) or L2 regularization (QHAdam)
) -> Optimizer:
"A QHAdam/QHAdamW `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), partial(average_sqr_grad, dampening=True), step_stat, qhadam_step]
return Optimizer(params, cbs, lr=lr, nu_1=nu_1, nu_2=nu_2 ,
mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 96
def larc_layer_lr(p, lr, trust_coeff, wd, eps, clip=True, **kwargs):
"Computes the local lr before weight decay is applied"
p_norm,g_norm = torch.norm(p.data),torch.norm(p.grad.data)
local_lr = lr*trust_coeff * (p_norm) / (g_norm + p_norm * wd + eps)
return {'local_lr': min(lr, local_lr) if clip else local_lr}
larc_layer_lr.defaults = dict(trust_coeff=0.02, wd=0., eps=1e-8)
# %% ../nbs/12_optimizer.ipynb 97
def larc_step(p, local_lr, grad_avg=None, **kwargs):
"Step for LARC `local_lr` on `p`"
p.data.add_(p.grad.data if grad_avg is None else grad_avg, alpha = -local_lr)
# %% ../nbs/12_optimizer.ipynb 98
def Larc(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
clip:bool=True, # LARC if clip=True, LARS if clip=False
trust_coeff:float=0.02, # Trust coeffiecnet for calculating layerwise LR
eps:float=1e-8, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization
) -> Optimizer:
"A LARC/LARS `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom!=0.: cbs.append(average_grad)
cbs += [partial(larc_layer_lr, clip=clip), larc_step]
return Optimizer(params, cbs, lr=lr, mom=mom, trust_coeff=trust_coeff, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 103
def lamb_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for LAMB with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r1 = p.data.pow(2).mean().sqrt()
step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps)
r2 = step.pow(2).mean().sqrt()
q = 1 if r1 == 0 or r2 == 0 else min(r1/r2,10)
p.data.add_(step, alpha = -lr * q)
lamb_step._defaults = dict(eps=1e-6, wd=0.)
# %% ../nbs/12_optimizer.ipynb 104
def Lamb(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.9, # Gradient moving average (β1) coefficient
sqr_mom:float=0.99, # Gradient squared moving average (β2) coefficient
eps:float=1e-5, # Added for numerical stability
wd:Real=0., # Optional weight decay (true or L2)
decouple_wd:bool=True # Apply true weight decay or L2 regularization
) -> Optimizer:
"A LAMB `Optimizer`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, lamb_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# %% ../nbs/12_optimizer.ipynb 109
class Lookahead(Optimizer, GetAttr):
"Wrap `opt` in a lookahead optimizer"
_default='opt'
def __init__(self,
opt:Optimizer, # `Optimizer` to wrap with Lookahead
k:int=6, # How often to conduct Lookahead step
alpha:float=0.5, # Slow weight moving average coefficient
):
store_attr('opt,k,alpha')
self._init_state()
def step(self, closure=None):
if closure is not None: raise NotImplementedError("fastai optimizers currently do not support closure")
if self.slow_weights is None: self._copy_weights()
self.opt.step()
self.count += 1
if self.count%self.k != 0: return
for slow_pg,fast_pg in zip(self.slow_weights,self.param_lists):
for slow_p,fast_p in zip(slow_pg,fast_pg):
slow_p.data.add_(fast_p.data-slow_p.data, alpha=self.alpha)
fast_p.data.copy_(slow_p.data)
def clear_state(self):
self.opt.clear_state()
self._init_state()
def state_dict(self):
state = self.opt.state_dict()
state.update({'count': self.count, 'slow_weights': self.slow_weights})
return state
def load_state_dict(self, sd):
self.count = sd.pop('count')
self.slow_weights = sd.pop('slow_weights')
self.opt.load_state_dict(sd)
def _init_state(self): self.count,self.slow_weights = 0,None
def _copy_weights(self): self.slow_weights = L(L(p.clone().detach() for p in pg) for pg in self.param_lists)
@property
def param_lists(self): return self.opt.param_lists
@param_lists.setter
def param_lists(self, v): self.opt.param_lists = v
# %% ../nbs/12_optimizer.ipynb 111
@delegates(RAdam)
def ranger(
params:Tensor|Iterable, # Model parameters
lr:float|slice, # Default learning rate
mom:float=0.95, # Gradient moving average (β1) coefficient
wd:Real=0.01, # Optional weight decay (true or L2)
eps:float=1e-6, # Added for numerical stability
k:int=6, # How often to conduct Lookahead step
alpha:float=0.5, # Slow weight moving average coefficient
**kwargs
) -> Lookahead:
"Convenience method for `Lookahead` with `RAdam`"
return Lookahead(RAdam(params, lr=lr, mom=mom, wd=wd, eps=eps, **kwargs), k=k, alpha=alpha)
# %% ../nbs/12_optimizer.ipynb 114
def detuplify_pg(d):
res = {}
for k,v in d.items():
if k == 'params': continue
if is_listy(v): res.update(**{f'{k}__{i}': v_ for i,v_ in enumerate(v)})
else: res[k] = v
return res
# %% ../nbs/12_optimizer.ipynb 116
def set_item_pg(pg, k, v):
if '__' not in k: pg[k] = v
else:
name,idx = k.split('__')
pg[name] = tuple(v if i==int(idx) else pg[name][i] for i in range_of(pg[name]))
return pg
# %% ../nbs/12_optimizer.ipynb 118
pytorch_hp_map = {'momentum': 'mom', 'weight_decay': 'wd', 'alpha': 'sqr_mom', 'betas__0': 'mom',
'betas__1': 'sqr_mom'}
# %% ../nbs/12_optimizer.ipynb 119
def _convert_params(o:list) -> list:
splitter = []
for group in o:
if isinstance(group, dict): splitter.append(group)
else: splitter.append({'params':group})
return splitter
# %% ../nbs/12_optimizer.ipynb 120
class OptimWrapper(_BaseOptimizer, GetAttr):
"A wrapper class for existing PyTorch optimizers"
_xtra=['zero_grad', 'step', 'state_dict', 'load_state_dict']
_default='opt'
def __init__(self,
params:Tensor|Iterable=None, # Model parameters. Don't set if using a built optimizer
opt:callable|torch.optim.Optimizer=None, # A torch optimizer constructor, or an already built optimizer
hp_map:dict=None, # A dictionary converting PyTorch optimizer keys to fastai's `Optimizer` keys. Defaults to `pytorch_hp_map`
convert_groups:bool=True, # Convert parameter groups from splitter or pass unaltered to `opt`
**kwargs
):
if params is None and opt is None: raise ValueError("Both `params` and `opt` cannot be None.")
if callable(opt):
if convert_groups:
params = L(params)
convert_groups = isinstance(params[0], (L,list))
self.opt = opt(_convert_params(params), **kwargs) if convert_groups else opt(params, **kwargs)
else:
if params is not None: raise ValueError("Tried using both `params` and a built optimizer. Just pass in `opt`.")
self.opt = opt
if hp_map is None: hp_map = pytorch_hp_map
self.fwd_map = {k: hp_map[k] if k in hp_map else k for k in detuplify_pg(self.opt.param_groups[0]).keys()}
self.bwd_map = {v:k for k,v in self.fwd_map.items()}
self.state = defaultdict(dict, {})
self.frozen_idx = 0
@property
def hypers(self):
return [{self.fwd_map.get(k, k):v for k,v in detuplify_pg(pg).items() if k != 'params'} for pg in self.opt.param_groups]
def _set_hyper(self, k, v):
for pg,v_ in zip(self.opt.param_groups,v): pg = set_item_pg(pg, self.bwd_map[k], v_)
def clear_state(self): self.opt.state = defaultdict(dict, {})
@property
def param_lists(self): return [pg['params'] for pg in self.opt.param_groups]
@param_lists.setter
def param_lists(self, v):
for pg,v_ in zip(self.opt.param_groups,v): pg['params'] = v_
| 21,187 | 41.717742 | 139 | py |
fastai | fastai-master/fastai/test_utils.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/97_test_utils.ipynb.
# %% ../nbs/97_test_utils.ipynb 0
from __future__ import annotations
from .imports import *
from .data.all import *
from .optimizer import *
from .learner import *
from .callback.core import *
from torch.utils.data import TensorDataset
# %% auto 0
__all__ = ['synth_dbunch', 'RegModel', 'synth_learner', 'VerboseCallback', 'get_env', 'try_import', 'nvidia_smi', 'nvidia_mem',
'show_install']
# %% ../nbs/97_test_utils.ipynb 3
from torch.utils.data import TensorDataset
# %% ../nbs/97_test_utils.ipynb 4
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2, cuda=False):
def get_data(n):
x = torch.randn(bs*n, 1)
return TensorDataset(x, a*x + b + 0.1*torch.randn(bs*n, 1))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
device = default_device() if cuda else None
train_dl = TfmdDL(train_ds, bs=bs, shuffle=True, num_workers=0)
valid_dl = TfmdDL(valid_ds, bs=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl, device=device)
# %% ../nbs/97_test_utils.ipynb 5
class RegModel(Module):
def __init__(self): self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
# %% ../nbs/97_test_utils.ipynb 6
@delegates(Learner.__init__)
def synth_learner(n_trn=10, n_val=2, cuda=False, lr=1e-3, data=None, model=None, **kwargs):
if data is None: data=synth_dbunch(n_train=n_trn,n_valid=n_val, cuda=cuda)
if model is None: model=RegModel()
return Learner(data, model, lr=lr, loss_func=MSELossFlat(),
opt_func=partial(SGD, mom=0.9), **kwargs)
# %% ../nbs/97_test_utils.ipynb 7
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# %% ../nbs/97_test_utils.ipynb 9
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
# %% ../nbs/97_test_utils.ipynb 10
def try_import(module):
"Try to import `module`. Returns module's object on success, None on failure"
try: return importlib.import_module(module)
except: return None
# %% ../nbs/97_test_utils.ipynb 11
def nvidia_smi(cmd = "nvidia-smi"):
try: res = run(cmd)
except OSError as e: return None
return res
# %% ../nbs/97_test_utils.ipynb 13
def nvidia_mem():
try: mem = run("nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader")
except: return None
return mem.strip().split('\n')
# %% ../nbs/97_test_utils.ipynb 15
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information"
import fastai, platform, fastprogress, fastcore
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__version__])
rep.append(["fastcore", fastcore.__version__])
rep.append(["fastprogress", fastprogress.__version__])
rep.append(["torch", torch.__version__])
# nvidia-smi
smi = nvidia_smi()
if smi:
match = re.findall(r'Driver Version: +(\d+\.\d+)', smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
# no point reporting on cudnn if cuda is not available, as it
# seems to be enabled at times even on cpu-only setups
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
gpu_total_mem = []
nvidia_gpu_cnt = 0
if smi:
mem = nvidia_mem()
nvidia_gpu_cnt = len(ifnone(mem, []))
if nvidia_gpu_cnt: rep.append(["nvidia gpus", nvidia_gpu_cnt])
torch_gpu_cnt = torch.cuda.device_count()
if torch_gpu_cnt:
rep.append(["torch devices", torch_gpu_cnt])
# information for each gpu
for i in range(torch_gpu_cnt):
rep.append([f" - gpu{i}", (f"{gpu_total_mem[i]}MB | " if gpu_total_mem else "") + torch.cuda.get_device_name(i)])
else:
if nvidia_gpu_cnt:
rep.append([f"Have {nvidia_gpu_cnt} GPU(s), but torch can't use them (check nvidia driver)", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == 'Linux':
distro = try_import('distro')
if distro:
# full distro info
rep.append(["distro", ' '.join(distro.linux_distribution())])
else:
opt_mods.append('distro');
# partial distro info
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env('CONDA_DEFAULT_ENV')])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {' '.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information")
| 6,033 | 35.131737 | 151 | py |
fastai | fastai-master/fastai/callback/hook.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/15_callback.hook.ipynb.
# %% ../../nbs/15_callback.hook.ipynb 1
from __future__ import annotations
from ..basics import *
# %% auto 0
__all__ = ['Hook', 'hook_output', 'Hooks', 'hook_outputs', 'dummy_eval', 'model_sizes', 'num_features_model', 'has_params',
'HookCallback', 'total_params', 'layer_info', 'module_summary', 'ActivationStats']
# %% ../../nbs/15_callback.hook.ipynb 13
@docs
class Hook():
"Create a hook on `m` with `hook_func`."
def __init__(self, m, hook_func, is_forward=True, detach=True, cpu=False, gather=False):
store_attr('hook_func,detach,cpu,gather')
f = m.register_forward_hook if is_forward else m.register_backward_hook
self.hook = f(self.hook_fn)
self.stored,self.removed = None,False
def hook_fn(self, module, input, output):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach:
input,output = to_detach(input, cpu=self.cpu, gather=self.gather),to_detach(output, cpu=self.cpu, gather=self.gather)
self.stored = self.hook_func(module, input, output)
def remove(self):
"Remove the hook from the model."
if not self.removed:
self.hook.remove()
self.removed=True
def __enter__(self, *args): return self
def __exit__(self, *args): self.remove()
_docs = dict(__enter__="Register the hook",
__exit__="Remove the hook")
# %% ../../nbs/15_callback.hook.ipynb 25
def _hook_inner(m,i,o): return o if isinstance(o,Tensor) or is_listy(o) else list(o)
def hook_output(module, detach=True, cpu=False, grad=False):
"Return a `Hook` that stores activations of `module` in `self.stored`"
return Hook(module, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# %% ../../nbs/15_callback.hook.ipynb 30
@docs
class Hooks():
"Create several hooks on the modules in `ms` with `hook_func`."
def __init__(self, ms, hook_func, is_forward=True, detach=True, cpu=False):
self.hooks = [Hook(m, hook_func, is_forward, detach, cpu) for m in ms]
def __getitem__(self,i): return self.hooks[i]
def __len__(self): return len(self.hooks)
def __iter__(self): return iter(self.hooks)
@property
def stored(self): return L(o.stored for o in self)
def remove(self):
"Remove the hooks from the model."
for h in self.hooks: h.remove()
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
_docs = dict(stored = "The states saved in each hook.",
__enter__="Register the hooks",
__exit__="Remove the hooks")
# %% ../../nbs/15_callback.hook.ipynb 39
def hook_outputs(modules, detach=True, cpu=False, grad=False):
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# %% ../../nbs/15_callback.hook.ipynb 43
def dummy_eval(m, size=(64,64)):
"Evaluate `m` on a dummy input of a certain `size`"
ch_in = in_channels(m)
x = one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)
with torch.no_grad(): return m.eval()(x)
# %% ../../nbs/15_callback.hook.ipynb 44
def model_sizes(m, size=(64,64)):
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
_ = dummy_eval(m, size=size)
return [o.stored.shape for o in hooks]
# %% ../../nbs/15_callback.hook.ipynb 46
def num_features_model(m):
"Return the number of output features for `m`."
sz,ch_in = 32,in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
return model_sizes(m, (sz,sz))[-1][1]
except Exception as e:
sz *= 2
if sz > 2048: raise e
# %% ../../nbs/15_callback.hook.ipynb 50
def has_params(m):
"Check if `m` has at least one parameter"
return len(list(m.parameters())) > 0
# %% ../../nbs/15_callback.hook.ipynb 52
@funcs_kwargs
class HookCallback(Callback):
"`Callback` that can be used to register hooks on `modules`"
_methods = ["hook"]
hook = noops
def __init__(self, modules=None, every=None, remove_end=True, is_forward=True, detach=True, cpu=True, include_paramless=False , **kwargs):
store_attr('modules,every,remove_end,is_forward,detach,cpu, include_paramless')
assert not kwargs
def before_fit(self):
"Register the `Hooks` on `self.modules`."
if self.modules is None: self.modules = [m for m in flatten_model(self.model) if self.include_paramless or has_params(m)]
if self.every is None: self._register()
def before_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._register()
def after_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._remove()
def after_fit(self):
"Remove the `Hooks`."
if self.remove_end: self._remove()
def _register(self): self.hooks = Hooks(self.modules, self.hook, self.is_forward, self.detach, self.cpu)
def _remove(self):
if getattr(self, 'hooks', None): self.hooks.remove()
def __del__(self): self._remove()
# %% ../../nbs/15_callback.hook.ipynb 59
def total_params(m):
"Give the number of parameters of a module and if it's trainable or not"
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
# %% ../../nbs/15_callback.hook.ipynb 61
def layer_info(learn, *xb):
"Return layer infos of `model` on `xb` (only support batch first inputs)"
def _track(m, i, o):
params, trainable, shape = '', '', ''
same = any((isinstance(x[0], torch.Tensor) and x[0].shape[1:] == x[1].shape for x in zip(i, o)))
shape = apply(lambda x: x.shape, o)
if hasattr(m, 'weight'): # non activation layer
params, trainable = total_params(m)
return (type(m).__name__, params, trainable, shape, same)
with Hooks(flatten_model(learn.model), _track) as h:
batch = apply(lambda o:o[:1], xb)
train_only_cbs = [cb for cb in learn.cbs if hasattr(cb, '_only_train_loop')]
with learn.removed_cbs(train_only_cbs), learn.no_logging(), learn as l:
r = l.get_preds(dl=[batch], inner=True, reorder=False)
return h.stored
# %% ../../nbs/15_callback.hook.ipynb 66
def _get_shapes(o, bs):
inp = o[first(o)] if (isinstance(o, dict)) else o
return ' x '.join([str(bs)] + [str(t) for t in inp[1:]])
def _print_shapes(o, bs):
if isinstance(o, torch.Size): return _get_shapes(o, bs)
elif isinstance(o, tuple): return _get_shapes(o[0], bs)
else: return str([_print_shapes(x, bs) for x in o])
# %% ../../nbs/15_callback.hook.ipynb 67
def module_summary(learn, *xb):
"Print a summary of `model` using `xb`"
#Individual parameters wrapped in ParameterModule aren't called through the hooks in `layer_info`,
# thus are not counted inside the summary
#TODO: find a way to have them counted in param number somehow
infos = layer_info(learn, *xb)
n,bs = 76,find_bs(xb)
inp_sz = _print_shapes(apply(lambda x:x.shape, xb), bs)
res = f"{type(learn.model).__name__} (Input shape: {inp_sz})\n"
res += "=" * n + "\n"
res += f"{'Layer (type)':<20} {'Output Shape':<20} {'Param #':<10} {'Trainable':<10}\n"
res += "=" * n
ps,trn_ps,j = 0,0,0
infos = [o for o in infos if o is not None] #see comment in previous cell
prev_sz = None
for typ,np,trn,sz,chnged in infos:
if sz is None: continue
if j == 0:
res += f'\n{"":<20} {_print_shapes(sz, bs)[:19]:<20}' # to avoid a double line at the top
if not chnged and not prev_sz == sz and j > 0: res += "\n" + "_" * n + "\n" + f'{"":<20} {_print_shapes(sz, bs)[:19]:<20}'
j = 1
res += f"\n{typ:<20} {'':<20} {np:<10} {str(trn):<10}"
if np != '':
ps += np
if trn: trn_ps += np
prev_sz = sz
res += "\n" + "_" * n + "\n"
res += f"\nTotal params: {ps:,}\n"
res += f"Total trainable params: {trn_ps:,}\n"
res += f"Total non-trainable params: {ps - trn_ps:,}\n\n"
return PrettyString(res)
# %% ../../nbs/15_callback.hook.ipynb 68
@patch
def summary(self:Learner):
"Print a summary of the model, optimizer and loss function."
xb = self.dls.train.one_batch()[:getattr(self.dls.train, "n_inp", 1)]
res = module_summary(self, *xb)
res += f"Optimizer used: {self.opt_func}\nLoss function: {self.loss_func}\n\n"
if self.opt is not None:
res += f"Model " + ("unfrozen\n\n" if self.opt.frozen_idx==0 else f"frozen up to parameter group #{self.opt.frozen_idx}\n\n")
res += "Callbacks:\n" + '\n'.join(f" - {cb}" for cb in self.cbs.sorted('order'))
return PrettyString(res)
# %% ../../nbs/15_callback.hook.ipynb 74
@delegates()
class ActivationStats(HookCallback):
"Callback that record the mean and std of activations."
order=-20
def __init__(self, with_hist=False, **kwargs):
super().__init__(**kwargs)
self.with_hist = with_hist
def before_fit(self):
"Initialize stats."
super().before_fit()
self.stats = L()
def hook(self, m, i, o):
if isinstance(o, tuple): return self.hook_multi_ouput(o)
o = o.float()
res = {'mean': o.mean().item(), 'std': o.std().item(),
'near_zero': (o<=0.05).long().sum().item()/o.numel()}
if self.with_hist: res['hist'] = o.histc(40,0,10)
return res
def hook_multi_ouput(self,o_tuple):
"For outputs of RNN which are [nested] tuples of tensors"
res = []
for o in self._flatten_tuple(o_tuple):
if not(isinstance(o, Tensor)): continue
res.append(self.hook(None, None, o))
return res
def _flatten_tuple(self, o_tuple):
"Recursively flatten a [nested] tuple"
res = []
for it in o_tuple:
if isinstance(it, tuple): res += self._flatten_tuple(it)
else: res += [it]
return tuple(res)
def after_batch(self):
"Take the stored results and puts it in `self.stats`"
if self.training and (self.every is None or self.train_iter%self.every == 0): self.stats.append(self.hooks.stored)
super().after_batch()
def layer_stats(self, idx):
lstats = self.stats.itemgot(idx)
return L(lstats.itemgot(o) for o in ('mean','std','near_zero'))
def hist(self, idx):
res = self.stats.itemgot(idx).itemgot('hist')
return torch.stack(tuple(res)).t().float().log1p()
def color_dim(self, idx, figsize=(10,5), ax=None):
"The 'colorful dimension' plot"
res = self.hist(idx)
if ax is None: ax = subplots(figsize=figsize)[1][0]
ax.imshow(res, origin='lower')
ax.axis('off')
def plot_layer_stats(self, idx):
_,axs = subplots(1, 3, figsize=(12,3))
for o,ax,title in zip(self.layer_stats(idx),axs,('mean','std','% near zero')):
ax.plot(o)
ax.set_title(title)
| 11,381 | 39.361702 | 142 | py |
fastai | fastai-master/fastai/callback/tracker.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/17_callback.tracker.ipynb.
# %% ../../nbs/17_callback.tracker.ipynb 2
from __future__ import annotations
from ..basics import *
from .progress import *
from .fp16 import MixedPrecision
# %% auto 0
__all__ = ['TerminateOnNaNCallback', 'TrackerCallback', 'EarlyStoppingCallback', 'SaveModelCallback', 'ReduceLROnPlateau']
# %% ../../nbs/17_callback.tracker.ipynb 6
class TerminateOnNaNCallback(Callback):
"A `Callback` that terminates training if loss is NaN."
order=-9
def after_batch(self):
"Test if `last_loss` is NaN and interrupts training."
if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
# %% ../../nbs/17_callback.tracker.ipynb 10
class TrackerCallback(Callback):
"A `Callback` that keeps track of the best value in `monitor`."
order,remove_on_fetch,_only_train_loop = 60,True,True
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
if comp is None: comp = np.less if 'loss' in monitor or 'error' in monitor else np.greater
if comp == np.less: min_delta *= -1
self.monitor,self.comp,self.min_delta,self.reset_on_fit,self.best= monitor,comp,min_delta,reset_on_fit,None
def before_fit(self):
"Prepare the monitored value"
self.run = not hasattr(self, "lr_finder") and not hasattr(self, "gather_preds")
if self.reset_on_fit or self.best is None: self.best = float('inf') if self.comp == np.less else -float('inf')
assert self.monitor in self.recorder.metric_names[1:]
self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)
def after_epoch(self):
"Compare the last value to the best up to now"
val = self.recorder.values[-1][self.idx]
if self.comp(val - self.min_delta, self.best): self.best,self.new_best = val,True
else: self.new_best = False
def after_fit(self): self.run=True
# %% ../../nbs/17_callback.tracker.ipynb 19
class EarlyStoppingCallback(TrackerCallback):
"A `TrackerCallback` that terminates training when monitored quantity stops improving."
order=TrackerCallback.order+3
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
patience=1, # number of epochs to wait when training has not improved model.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience = patience
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and maybe stop training."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
print(f'No improvement since epoch {self.epoch-self.wait}: early stopping')
raise CancelFitException()
# %% ../../nbs/17_callback.tracker.ipynb 26
class SaveModelCallback(TrackerCallback):
"A `TrackerCallback` that saves the model's best during training and loads it at the end."
order = TrackerCallback.order+1
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
fname='model', # model name to be used when saving model.
every_epoch=False, # if true, save model after every epoch; else save only when model is better than existing best.
at_end=False, # if true, save model when training ends; else load best model if there is only one saved model.
with_opt=False, # if true, save optimizer state (if any available) when saving model.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
assert not (every_epoch and at_end), "every_epoch and at_end cannot both be set to True"
# keep track of file path for loggers
self.last_saved_path = None
store_attr('fname,every_epoch,at_end,with_opt')
def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)
def after_epoch(self):
"Compare the value monitored to its best score and save if best."
if self.every_epoch:
if (self.epoch%self.every_epoch) == 0: self._save(f'{self.fname}_{self.epoch}')
else: #every improvement
super().after_epoch()
if self.new_best:
print(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.')
self._save(f'{self.fname}')
def after_fit(self, **kwargs):
"Load the best model."
if self.at_end: self._save(f'{self.fname}')
elif not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt)
# %% ../../nbs/17_callback.tracker.ipynb 30
class ReduceLROnPlateau(TrackerCallback):
"A `TrackerCallback` that reduces learning rate when a metric has stopped improving."
order=TrackerCallback.order+2
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
patience=1, # number of epochs to wait when training has not improved model.
factor=10., # the denominator to divide the learning rate by, when reducing the learning rate.
min_lr=0, # the minimum learning rate allowed; learning rate cannot be reduced below this minimum.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience,self.factor,self.min_lr = patience,factor,min_lr
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and reduce LR by `factor` if no improvement."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
old_lr = self.opt.hypers[-1]['lr']
for h in self.opt.hypers: h['lr'] = max(h['lr'] / self.factor, self.min_lr)
self.wait = 0
if self.opt.hypers[-1]["lr"] < old_lr:
print(f'Epoch {self.epoch}: reducing lr to {self.opt.hypers[-1]["lr"]}')
| 7,720 | 54.15 | 148 | py |
fastai | fastai-master/fastai/callback/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/13_callback.core.ipynb.
# %% ../../nbs/13_callback.core.ipynb 2
from __future__ import annotations
from ..data.all import *
from ..optimizer import *
from ..losses import BaseLoss
# %% auto 0
__all__ = ['Callback', 'TrainEvalCallback', 'GatherPredsCallback', 'FetchPredsCallback', 'CancelStepException',
'CancelBackwardException', 'CancelFitException', 'CancelEpochException', 'CancelTrainException',
'CancelValidException', 'CancelBatchException', 'event']
# %% ../../nbs/13_callback.core.ipynb 4
_all_ = ['CancelStepException','CancelBackwardException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# %% ../../nbs/13_callback.core.ipynb 8
_events = L.split('after_create before_fit before_epoch before_train before_batch after_pred after_loss \
before_backward after_cancel_backward after_backward before_step after_cancel_step after_step \
after_cancel_batch after_batch after_cancel_train after_train before_validate after_cancel_validate \
after_validate after_cancel_epoch after_epoch after_cancel_fit after_fit')
mk_class('event', **_events.map_dict(),
doc="All possible events as attributes to get tab-completion and typo-proofing")
# %% ../../nbs/13_callback.core.ipynb 9
_all_ = ['event']
# %% ../../nbs/13_callback.core.ipynb 14
_inner_loop = "before_batch after_pred after_loss before_backward after_cancel_backward after_backward before_step after_step after_cancel_batch after_batch".split()
# %% ../../nbs/13_callback.core.ipynb 15
_ex_docs = dict(
CancelBatchException="Skip the rest of this batch and go to `after_batch`",
CancelTrainException="Skip the rest of the training part of the epoch and go to `after_train`",
CancelValidException="Skip the rest of the validation part of the epoch and go to `after_validate`",
CancelEpochException="Skip the rest of this epoch and go to `after_epoch`",
CancelStepException ="Skip stepping the optimizer",
CancelBackwardException="Skip the backward pass and go to `after_backward`",
CancelFitException ="Interrupts training and go to `after_fit`")
for c,d in _ex_docs.items(): mk_class(c,sup=Exception,doc=d)
# %% ../../nbs/13_callback.core.ipynb 16
@funcs_kwargs(as_method=True)
class Callback(Stateful,GetAttr):
"Basic class handling tweaks of the training loop by changing a `Learner` in various events"
order,_default,learn,run,run_train,run_valid = 0,'learn',None,True,True,True
_methods = _events
def __init__(self, **kwargs): assert not kwargs, f'Passed unknown events: {kwargs}'
def __repr__(self): return type(self).__name__
def __call__(self, event_name):
"Call `self.{event_name}` if it's defined"
_run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
(self.run_valid and not getattr(self, 'training', False)))
res = None
if self.run and _run:
try: res = getcallable(self, event_name)()
except (CancelBatchException, CancelBackwardException, CancelEpochException, CancelFitException, CancelStepException, CancelTrainException, CancelValidException): raise
except Exception as e: raise modify_exception(e, f'Exception occured in `{self.__class__.__name__}` when calling event `{event_name}`:\n\t{e.args[0]}', replace=True)
if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit
return res
def __setattr__(self, name, value):
"Set an attribute for a `Callback`"
if hasattr(self.learn,name):
warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
super().__setattr__(name, value)
@property
def name(self):
"Name of the `Callback`, camel-cased and with '*Callback*' removed"
return class2attr(self, 'Callback')
# %% ../../nbs/13_callback.core.ipynb 34
class TrainEvalCallback(Callback):
"`Callback` that tracks the number of iterations done and properly sets training/eval mode"
order,run_valid = -10,False
def after_create(self): self.learn.n_epoch = 1
def before_fit(self):
"Set the iter and epoch counters to 0, put the model and the right device"
self.learn.epoch,self.learn.loss = 0,tensor(0.)
self.learn.train_iter,self.learn.pct_train = 0,0.
device = getattr(self.dls, 'device', default_device())
self.model.to(device)
if isinstance(self.loss_func, (nn.Module, BaseLoss)): self.loss_func.to(device)
if hasattr(self.model, 'reset'): self.model.reset()
def after_batch(self):
"Update the iter counter (in training mode)"
self.learn.pct_train += 1./(self.n_iter*self.n_epoch)
self.learn.train_iter += 1
def before_train(self):
"Set the model to training mode"
self.learn.pct_train=self.epoch/self.n_epoch
self.model.train()
self.learn.training=True
def before_validate(self):
"Set the model to validation mode"
self.model.eval()
self.learn.training=False
# %% ../../nbs/13_callback.core.ipynb 38
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# %% ../../nbs/13_callback.core.ipynb 52
class GatherPredsCallback(Callback):
"`Callback` that returns all predictions and targets, optionally `with_input` or `with_loss`"
_stateattrs=('preds','targets','inputs','losses')
def __init__(self,
with_input:bool=False, # Whether to return inputs
with_loss:bool=False, # Whether to return losses
save_preds:Path=None, # Path to save predictions
save_targs:Path=None, # Path to save targets
with_preds:bool=True, # Whether to return predictions
with_targs:bool=True, # Whether to return targets
concat_dim:int=0, # Dimension to concatenate returned tensors
pickle_protocol:int=2 # Pickle protocol used to save predictions and targets
):
store_attr()
def before_batch(self):
"If `with_input`, detach batch inputs"
if self.with_input: self.inputs.append((self.learn.to_detach(self.xb)))
def before_validate(self):
"Initialize containers"
self.preds,self.targets = [],[]
if self.with_input: self.inputs = []
if self.with_loss: self.losses = []
def after_batch(self):
"Save predictions, targets and potentially losses"
if not hasattr(self, 'pred'): return
preds,targs = self.learn.to_detach(self.pred),self.learn.to_detach(self.yb)
if self.with_preds: self.preds.append(preds)
if self.with_targs: self.targets.append(targs)
if self.save_preds is not None:
torch.save(preds, self.save_preds/str(self.iter), pickle_protocol=self.pickle_protocol)
if self.save_targs is not None:
torch.save(targs[0], self.save_targs/str(self.iter), pickle_protocol=self.pickle_protocol)
if self.with_loss:
bs = find_bs(self.yb)
loss = self.loss if self.loss.numel() == bs else self.loss.view(bs,-1).mean(1)
self.losses.append(self.learn.to_detach(loss))
def after_validate(self):
"Concatenate all recorded tensors"
if not hasattr(self, 'preds'): return
if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim))
if self.with_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim))
if self.with_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim))
if self.with_loss: self.losses = to_concat(self.losses)
def all_tensors(self) -> (Tensor, list):
"Returns all recorded tensors in the order [inputs, preds, targets, losses]"
res = [self.preds if self.with_preds else None, self.targets if self.with_targs else None]
if self.with_input: res = [self.inputs] + res
if self.with_loss: res.append(self.losses)
return res
# %% ../../nbs/13_callback.core.ipynb 54
class FetchPredsCallback(Callback):
"A callback to fetch predictions during the training loop"
remove_on_fetch = True
def __init__(self,
ds_idx:int=1, # Index of dataset, 0 for train, 1 for valid, used if `dl` is not present
dl:DataLoader=None, # `DataLoader` used for fetching `Learner` predictions
with_input:bool=False, # Whether to return inputs in `GatherPredsCallback`
with_decoded:bool=False, # Whether to return decoded predictions
cbs:Callback|MutableSequence=None, # `Callback` to temporarily remove from `Learner`
reorder:bool=True # Whether to sort prediction results
):
self.cbs = L(cbs)
store_attr('ds_idx,dl,with_input,with_decoded,reorder')
def after_validate(self):
"Fetch predictions from `Learner` without `self.cbs` and `remove_on_fetch` callbacks"
to_rm = L(cb for cb in self.learn.cbs if getattr(cb, 'remove_on_fetch', False))
with self.learn.removed_cbs(to_rm + self.cbs) as learn:
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True, reorder=self.reorder)
| 9,371 | 48.851064 | 180 | py |
fastai | fastai-master/fastai/callback/channelslast.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/18c_callback.channelslast.ipynb.
# %% ../../nbs/18c_callback.channelslast.ipynb 1
from __future__ import annotations
from ..basics import *
from .fp16 import MixedPrecision
from torch.cuda.amp import GradScaler
# %% auto 0
__all__ = ['ChannelsLast']
# %% ../../nbs/18c_callback.channelslast.ipynb 7
class ChannelsLast(Callback):
"Channels last training using PyTorch's Channels Last Memory Format (beta)"
order = -1 # Needs to run before any model modification callbacks occur
def before_fit(self):
self.learn.model.to(memory_format=torch.channels_last)
# %% ../../nbs/18c_callback.channelslast.ipynb 9
@patch
@delegates(GradScaler)
def to_channelslast(self:Learner,
to_fp16:bool=True, # Add `MixedPrecision` callback. Recommended for full channels last performance
**kwargs
):
"Set `Learner` and inputs to `channels_last` format and `MixedPrecision` by default"
if to_fp16 and not hasattr(self, 'mixed_precision') and not hasattr(self, 'channels_last'):
return self.add_cbs([ChannelsLast(), MixedPrecision(**kwargs)])
elif not hasattr(self, 'channels_last'):
return self.add_cb(ChannelsLast())
# %% ../../nbs/18c_callback.channelslast.ipynb 10
@patch
def to_contiguous(self:Learner, to_fp32:bool=False):
"Set `Learner` and inputs to `contiguous_format` (default format), optionally to single precision"
self.model.to(memory_format=torch.contiguous_format)
if to_fp32:
return self.remove_cbs([ChannelsLast, MixedPrecision])
else:
return self.remove_cb(ChannelsLast)
| 1,614 | 37.452381 | 102 | py |
fastai | fastai-master/fastai/callback/captum.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/70c_callback.captum.ipynb.
# %% ../../nbs/70c_callback.captum.ipynb 3
from __future__ import annotations
import tempfile
from ..basics import *
# %% auto 0
__all__ = ['CaptumInterpretation']
# %% ../../nbs/70c_callback.captum.ipynb 6
from ipykernel import jsonutil
# %% ../../nbs/70c_callback.captum.ipynb 7
# Dirty hack as json_clean doesn't support CategoryMap type
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# %% ../../nbs/70c_callback.captum.ipynb 8
from captum.attr import IntegratedGradients,NoiseTunnel,GradientShap,Occlusion
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
# %% ../../nbs/70c_callback.captum.ipynb 16
class CaptumInterpretation():
"Captum Interpretation for Resnet"
def __init__(self,learn,cmap_name='custom blue',colors=None,N=256,methods=('original_image','heat_map'),
signs=("all", "positive"),outlier_perc=1):
if colors is None: colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')]
store_attr()
self.dls,self.model = learn.dls,self.learn.model
self.supported_metrics=['IG','NT','Occl']
def get_baseline_img(self, img_tensor,baseline_type):
baseline_img=None
if baseline_type=='zeros': baseline_img= img_tensor*0
if baseline_type=='uniform': baseline_img= torch.rand(img_tensor.shape)
if baseline_type=='gauss':
baseline_img= (torch.rand(img_tensor.shape).to(self.dls.device)+img_tensor)/2
return baseline_img.to(self.dls.device)
def visualize(self,inp,metric='IG',n_steps=1000,baseline_type='zeros',nt_type='smoothgrad', strides=(3,4,4), sliding_window_shapes=(3,15,15)):
if metric not in self.supported_metrics:
raise Exception(f"Metric {metric} is not supported. Currently {self.supported_metrics} are only supported")
tls = L([TfmdLists(inp, t) for t in L(ifnone(self.dls.tfms,[None]))])
inp_data=list(zip(*(tls[0],tls[1])))[0]
enc_data,dec_data=self._get_enc_dec_data(inp_data)
attributions=self._get_attributions(enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes)
self._viz(attributions,dec_data,metric)
def _viz(self,attributions,dec_data,metric):
default_cmap = LinearSegmentedColormap.from_list(self.cmap_name,self.colors, N=self.N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(dec_data[0].numpy(), (1,2,0)),
methods=self.methods,
cmap=default_cmap,
show_colorbar=True,
signs=self.signs,
outlier_perc=self.outlier_perc, titles=[f'Original Image - ({dec_data[1]})', metric])
def _get_enc_dec_data(self,inp_data):
dec_data=self.dls.after_item(inp_data)
enc_data=self.dls.after_batch(to_device(self.dls.before_batch(dec_data),self.dls.device))
return(enc_data,dec_data)
def _get_attributions(self,enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes):
# Get Baseline
baseline=self.get_baseline_img(enc_data[0],baseline_type)
supported_metrics ={}
if metric == 'IG':
self._int_grads = self._int_grads if hasattr(self,'_int_grads') else IntegratedGradients(self.model)
return self._int_grads.attribute(enc_data[0],baseline, target=enc_data[1], n_steps=200)
elif metric == 'NT':
self._int_grads = self._int_grads if hasattr(self,'_int_grads') else IntegratedGradients(self.model)
self._noise_tunnel= self._noise_tunnel if hasattr(self,'_noise_tunnel') else NoiseTunnel(self._int_grads)
return self._noise_tunnel.attribute(enc_data[0].to(self.dls.device), n_samples=1, nt_type=nt_type, target=enc_data[1])
elif metric == 'Occl':
self._occlusion = self._occlusion if hasattr(self,'_occlusion') else Occlusion(self.model)
return self._occlusion.attribute(enc_data[0].to(self.dls.device),
strides = strides,
target=enc_data[1],
sliding_window_shapes=sliding_window_shapes,
baselines=baseline)
# %% ../../nbs/70c_callback.captum.ipynb 26
@patch
def insights(x: CaptumInterpretation,inp_data,debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = x.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
# captum v0.3 expects tensors without the batch dimension.
if nested_attr(normalize_func, 'mean.ndim', 4)==4: normalize_func.mean.squeeze_(0)
if nested_attr(normalize_func, 'std.ndim', 4)==4: normalize_func.std.squeeze_(0)
visualizer = AttributionVisualizer(
models=[x.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[ImageFeature("Image", baseline_transforms=[_baseline_func], input_transforms=[normalize_func])],
dataset=x._formatted_data_iter(dl,normalize_func))
visualizer.render(debug=debug)
| 5,807 | 49.947368 | 146 | py |
fastai | fastai-master/fastai/callback/tensorboard.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/70a_callback.tensorboard.ipynb.
# %% ../../nbs/70a_callback.tensorboard.ipynb 3
from __future__ import annotations
from ..basics import *
# %% auto 0
__all__ = ['TensorBoardBaseCallback', 'TensorBoardCallback', 'TensorBoardProjectorCallback', 'projector_word_embeddings',
'tensorboard_log']
# %% ../../nbs/70a_callback.tensorboard.ipynb 18
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from .fp16 import ModelToHalf
from .hook import hook_output
# %% ../../nbs/70a_callback.tensorboard.ipynb 19
class TensorBoardBaseCallback(Callback):
order = Recorder.order+1
"Base class for tensorboard callbacks"
def __init__(self): self.run_projector = False
def after_pred(self):
if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat)
def after_validate(self):
if not self.run_projector: return
self.run_projector = False
self._remove()
_write_projector_embedding(self.learn, self.writer, self.feat)
def after_fit(self):
if self.run: self.writer.close()
def _setup_projector(self):
self.run_projector = True
self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer)
self.feat = {}
def _setup_writer(self): self.writer = SummaryWriter(log_dir=self.log_dir)
def __del__(self): self._remove()
def _remove(self):
if getattr(self, 'h', None): self.h.remove()
# %% ../../nbs/70a_callback.tensorboard.ipynb 21
class TensorBoardCallback(TensorBoardBaseCallback):
"Saves model topology, losses & metrics for tensorboard and tensorboard projector during training"
def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
if self.trace_model:
if hasattr(self.learn, 'mixed_precision'):
raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.")
b = self.dls.one_batch()
self.learn._split(b)
self.writer.add_graph(self.model, *self.xb)
def after_batch(self):
self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)
for i,h in enumerate(self.opt.hypers):
for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)
def after_epoch(self):
for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):
self.writer.add_scalar(n, v, self.train_iter)
if self.log_preds:
b = self.dls.valid.one_batch()
self.learn.one_batch(0, b)
preds = getcallable(self.loss_func, 'activation')(self.pred)
out = getcallable(self.loss_func, 'decodes')(preds)
x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)
tensorboard_log(x, y, its, outs, self.writer, self.train_iter)
def before_validate(self):
if self.projector: self._setup_projector()
# %% ../../nbs/70a_callback.tensorboard.ipynb 23
class TensorBoardProjectorCallback(TensorBoardBaseCallback):
"Extracts and exports image featuers for tensorboard projector during inference"
def __init__(self, log_dir=None, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
def before_validate(self):
self._setup_projector()
# %% ../../nbs/70a_callback.tensorboard.ipynb 25
def _write_projector_embedding(learn, writer, feat):
lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None
vecs = feat['vec'].squeeze()
writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter)
# %% ../../nbs/70a_callback.tensorboard.ipynb 26
def _add_projector_features(learn, hook, feat):
img = _normalize_for_projector(learn.x)
first_epoch = True if learn.iter == 0 else False
feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0)
feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0)
if getattr(learn.dl, 'vocab', None):
feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0)
return feat
# %% ../../nbs/70a_callback.tensorboard.ipynb 27
def _get_embeddings(model, layer):
layer = model[0].encoder if layer == None else layer
return layer.weight
# %% ../../nbs/70a_callback.tensorboard.ipynb 28
@typedispatch
def _normalize_for_projector(x:TensorImage):
# normalize tensor to be between 0-1
img = x.clone()
sz = img.shape
img = img.view(x.size(0), -1)
img -= img.min(1, keepdim=True)[0]
img /= img.max(1, keepdim=True)[0]
img = img.view(*sz)
return img
# %% ../../nbs/70a_callback.tensorboard.ipynb 29
from ..text.all import LMLearner, TextLearner
# %% ../../nbs/70a_callback.tensorboard.ipynb 30
def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None):
"Extracts and exports word embeddings from language models embedding layers"
if not layer:
if isinstance(learn, LMLearner): layer = learn.model[0].encoder
elif isinstance(learn, TextLearner): layer = learn.model[0].module.encoder
emb = layer.weight
img = torch.full((len(emb),3,8,8), 0.7)
vocab = learn.dls.vocab[0] if vocab == None else vocab
vocab = list(map(lambda x: f'{x}_', vocab))
writer = SummaryWriter(log_dir=log_dir)
end = start + limit if limit >= 0 else -1
writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end])
writer.close()
# %% ../../nbs/70a_callback.tensorboard.ipynb 32
from ..vision.data import *
# %% ../../nbs/70a_callback.tensorboard.ipynb 33
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
# %% ../../nbs/70a_callback.tensorboard.ipynb 34
from ..vision.core import TensorPoint,TensorBBox
# %% ../../nbs/70a_callback.tensorboard.ipynb 35
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorImageBase|TensorPoint|TensorBBox, samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step)
| 7,323 | 41.33526 | 121 | py |
fastai | fastai-master/fastai/callback/mixup.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/19_callback.mixup.ipynb.
# %% ../../nbs/19_callback.mixup.ipynb 2
from __future__ import annotations
from ..basics import *
from torch.distributions.beta import Beta
# %% auto 0
__all__ = ['reduce_loss', 'MixHandler', 'MixUp', 'CutMix']
# %% ../../nbs/19_callback.mixup.ipynb 6
def reduce_loss(
loss:Tensor,
reduction:str='mean' # PyTorch loss reduction
)->Tensor:
"Reduce the loss based on `reduction`"
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
# %% ../../nbs/19_callback.mixup.ipynb 7
class MixHandler(Callback):
"A handler class for implementing `MixUp` style scheduling"
run_valid = False
def __init__(self,
alpha:float=0.5 # Determine `Beta` distribution in range (0.,inf]
):
self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_train(self):
"Determine whether to stack y"
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_train(self):
"Set the loss function back to the previous loss"
if self.stack_y: self.learn.loss_func = self.old_lf
def after_cancel_train(self):
"If training is canceled, still set the loss function back"
self.after_train()
def after_cancel_fit(self):
"If fit is canceled, still set the loss function back"
self.after_train()
def lf(self, pred, *yb):
"lf is a loss function that applies the original loss function on both outputs based on `self.lam`"
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
# %% ../../nbs/19_callback.mixup.ipynb 10
class MixUp(MixHandler):
"Implementation of https://arxiv.org/abs/1710.09412"
def __init__(self,
alpha:float=.4 # Determine `Beta` distribution in range (0.,inf]
):
super().__init__(alpha)
def before_batch(self):
"Blend xb and yb with another random item in a second batch (xb1,yb1) with `lam` weights"
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
# %% ../../nbs/19_callback.mixup.ipynb 21
class CutMix(MixHandler):
"Implementation of https://arxiv.org/abs/1905.04899"
def __init__(self,
alpha:float=1. # Determine `Beta` distribution in range (0.,inf]
):
super().__init__(alpha)
def before_batch(self):
"Add `rand_bbox` patches with size based on `lam` and location chosen randomly."
bs, _, H, W = self.x.size()
self.lam = self.distrib.sample((1,)).to(self.x.device)
shuffle = torch.randperm(bs).to(self.x.device)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def rand_bbox(self,
W:int, # Width bbox will be
H:int, # Height bbox will be
lam:Tensor # lambda sample from Beta distribution i.e tensor([0.3647])
)->tuple: # Represents the top-left pixel location and the bottom-right pixel location
"Give a bounding box location based on the size of the im and a weight"
cut_rat = torch.sqrt(1. - lam).to(self.x.device)
cut_w = torch.round(W * cut_rat).type(torch.long).to(self.x.device)
cut_h = torch.round(H * cut_rat).type(torch.long).to(self.x.device)
# uniform
cx = torch.randint(0, W, (1,)).to(self.x.device)
cy = torch.randint(0, H, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2
| 4,833 | 42.160714 | 114 | py |
fastai | fastai-master/fastai/callback/schedule.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/14_callback.schedule.ipynb.
# %% ../../nbs/14_callback.schedule.ipynb 2
from __future__ import annotations
from ..basics import *
from .tracker import SaveModelCallback
# %% auto 0
__all__ = ['annealer', 'sched_lin', 'sched_cos', 'sched_no', 'sched_exp', 'SchedLin', 'SchedCos', 'SchedNo', 'SchedExp',
'SchedPoly', 'combine_scheds', 'combined_cos', 'ParamScheduler', 'LRFinder', 'valley', 'slide', 'minimum',
'steep', 'SuggestionMethod']
# %% ../../nbs/14_callback.schedule.ipynb 3
_all_ = ['SuggestionMethod']
# %% ../../nbs/14_callback.schedule.ipynb 8
class _Annealer:
def __init__(self, f, start, end): store_attr('f,start,end')
def __call__(self, pos): return self.f(self.start, self.end, pos)
# %% ../../nbs/14_callback.schedule.ipynb 9
def annealer(f):
"Decorator to make `f` return itself partially applied."
@functools.wraps(f)
def _inner(start, end): return _Annealer(f, start, end)
return _inner
# %% ../../nbs/14_callback.schedule.ipynb 11
#TODO Jeremy, make this pickle
#@annealer
#def SchedLin(start, end, pos): return start + pos*(end-start)
#@annealer
#def SchedCos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
#@annealer
#def SchedNo (start, end, pos): return start
#@annealer
#def SchedExp(start, end, pos): return start * (end/start) ** pos
#
#SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
#SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
#SchedNo .__doc__ = "Constant schedule function with `start` value"
#SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# %% ../../nbs/14_callback.schedule.ipynb 12
def sched_lin(start, end, pos): return start + pos*(end-start)
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
def sched_no (start, end, pos): return start
def sched_exp(start, end, pos): return start * (end/start) ** pos
def SchedLin(start, end): return _Annealer(sched_lin, start, end)
def SchedCos(start, end): return _Annealer(sched_cos, start, end)
def SchedNo (start, end): return _Annealer(sched_no, start, end)
def SchedExp(start, end): return _Annealer(sched_exp, start, end)
SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
SchedNo .__doc__ = "Constant schedule function with `start` value"
SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# %% ../../nbs/14_callback.schedule.ipynb 15
def SchedPoly(start, end, power):
"Polynomial schedule (of `power`) function from `start` to `end`"
def _inner(pos): return start + (end - start) * pos ** power
return _inner
# %% ../../nbs/14_callback.schedule.ipynb 28
def combine_scheds(pcts, scheds):
"Combine `scheds` according to `pcts` in one function"
assert sum(pcts) == 1.
pcts = tensor([0] + L(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
pct_lim = len(pcts) - 2
def _inner(pos):
idx = min((pos >= pcts).nonzero().max(), pct_lim)
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos.item())
return _inner
# %% ../../nbs/14_callback.schedule.ipynb 33
def combined_cos(pct, start, middle, end):
"Return a scheduler with cosine annealing from `start`→`middle` & `middle`→`end`"
return combine_scheds([pct,1-pct], [SchedCos(start, middle), SchedCos(middle, end)])
# %% ../../nbs/14_callback.schedule.ipynb 38
@docs
class ParamScheduler(Callback):
"Schedule hyper-parameters according to `scheds`"
order,run_valid = 60,False
def __init__(self, scheds): self.scheds = scheds
def before_fit(self): self.hps = {p:[] for p in self.scheds.keys()}
def before_batch(self): self._update_val(self.pct_train)
def _update_val(self, pct):
for n,f in self.scheds.items(): self.opt.set_hyper(n, f(pct))
def after_batch(self):
for p in self.scheds.keys(): self.hps[p].append(self.opt.hypers[-1][p])
def after_fit(self):
if hasattr(self.learn, 'recorder') and hasattr(self, 'hps'): self.recorder.hps = self.hps
_docs = {"before_fit": "Initialize container for hyper-parameters",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch",
"after_fit": "Save the hyper-parameters in the recorder if there is one"}
# %% ../../nbs/14_callback.schedule.ipynb 46
@patch
def fit_one_cycle(self:Learner, n_epoch, lr_max=None, div=25., div_final=1e5, pct_start=0.25, wd=None,
moms=None, cbs=None, reset_opt=False, start_epoch=0):
"Fit `self.model` for `n_epoch` using the 1cycle policy."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=start_epoch)
# %% ../../nbs/14_callback.schedule.ipynb 50
@patch
def plot_sched(self:Recorder, keys=None, figsize=None):
keys = self.hps.keys() if keys is None else L(keys)
rows,cols = (len(keys)+1)//2, min(2, len(keys))
figsize = figsize or (6*cols,4*rows)
_, axs = plt.subplots(rows, cols, figsize=figsize)
axs = axs.flatten() if len(keys) > 1 else L(axs)
for p,ax in zip(keys, axs):
ax.plot(self.hps[p])
ax.set_ylabel(p)
# %% ../../nbs/14_callback.schedule.ipynb 54
@patch
def fit_flat_cos(self:Learner, n_epoch, lr=None, div_final=1e5, pct_start=0.75, wd=None,
cbs=None, reset_opt=False, start_epoch=0):
"Fit `self.model` for `n_epoch` at flat `lr` before a cosine annealing."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr is None else lr)
lr = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr, lr, lr/div_final)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=0)
# %% ../../nbs/14_callback.schedule.ipynb 57
@patch
def fit_sgdr(self:Learner, n_cycles, cycle_len, lr_max=None, cycle_mult=2, cbs=None, reset_opt=False, wd=None,
start_epoch=0):
"Fit `self.model` for `n_cycles` of `cycle_len` using SGDR."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
n_epoch = cycle_len * (cycle_mult**n_cycles-1)//(cycle_mult-1)
pcts = [cycle_len * cycle_mult**i / n_epoch for i in range(n_cycles)]
scheds = [SchedCos(lr_max, 0) for _ in range(n_cycles)]
scheds = {'lr': combine_scheds(pcts, scheds)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=start_epoch)
# %% ../../nbs/14_callback.schedule.ipynb 60
@patch
@delegates(Learner.fit_one_cycle)
def fine_tune(self:Learner, epochs, base_lr=2e-3, freeze_epochs=1, lr_mult=100,
pct_start=0.3, div=5.0, **kwargs):
"Fine tune with `Learner.freeze` for `freeze_epochs`, then with `Learner.unfreeze` for `epochs`, using discriminative LR."
self.freeze()
self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
base_lr /= 2
self.unfreeze()
self.fit_one_cycle(epochs, slice(base_lr/lr_mult, base_lr), pct_start=pct_start, div=div, **kwargs)
# %% ../../nbs/14_callback.schedule.ipynb 67
@docs
class LRFinder(ParamScheduler):
"Training with exponentially growing learning rate"
def __init__(self, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True):
if num_it < 6: num_it = 6
self.scheds = {'lr': [SchedExp(s, e) for (s,e) in zip(start_lr,end_lr)
] if is_listy(start_lr) else SchedExp(start_lr, end_lr)}
self.num_it,self.stop_div = num_it,stop_div
def before_fit(self):
super().before_fit()
path = self.path/self.model_dir
path.mkdir(parents=True, exist_ok=True)
self.tmp_d = tempfile.TemporaryDirectory(dir=path)
self.tmp_p = Path(self.tmp_d.name).stem
self.learn.save(f'{self.tmp_p}/_tmp')
self.best_loss = float('inf')
def before_batch(self): self._update_val(self.train_iter/self.num_it)
def after_batch(self):
super().after_batch()
if self.smooth_loss < self.best_loss: self.best_loss = self.smooth_loss
if self.smooth_loss > 4*self.best_loss and self.stop_div: raise CancelFitException()
if self.train_iter >= self.num_it: raise CancelFitException()
def before_validate(self): raise CancelValidException()
def after_fit(self):
self.learn.opt.zero_grad() # Needed before detaching the optimizer for future fits
tmp_f = self.path/self.model_dir/self.tmp_p/'_tmp.pth'
if tmp_f.exists():
self.learn.load(f'{self.tmp_p}/_tmp', with_opt=True)
self.tmp_d.cleanup()
_docs = {"before_fit": "Initialize container for hyper-parameters and save the model",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch and potentially stop training",
"after_fit": "Save the hyper-parameters in the recorder if there is one and load the original model",
"before_validate": "Skip the validation part of training"}
# %% ../../nbs/14_callback.schedule.ipynb 78
def valley(lrs:list, losses:list, num_it:int):
"Suggests a learning rate from the longest valley and returns its index"
n = len(losses)
max_start, max_end = 0,0
# find the longest valley
lds = [1]*n
for i in range(1,n):
for j in range(0,i):
if (losses[i] < losses[j]) and (lds[i] < lds[j] + 1):
lds[i] = lds[j] + 1
if lds[max_end] < lds[i]:
max_end = i
max_start = max_end - lds[max_end]
sections = (max_end - max_start) / 3
idx = max_start + int(sections) + int(sections/2)
return float(lrs[idx]), (float(lrs[idx]), losses[idx])
# %% ../../nbs/14_callback.schedule.ipynb 81
def slide(lrs:list, losses:list, num_it:int, lr_diff:int=15, thresh:float=.005, adjust_value:float=1.):
"Suggests a learning rate following an interval slide rule and returns its index"
losses = to_np(losses)
loss_grad = np.gradient(losses)
r_idx = -1
l_idx = r_idx - lr_diff
local_min_lr = lrs[l_idx]
while (l_idx >= -len(losses)) and (abs(loss_grad[r_idx] - loss_grad[l_idx]) > thresh):
local_min_lr = lrs[l_idx]
r_idx -= 1
l_idx -= 1
suggestion = float(local_min_lr) * adjust_value
idx = np.interp(np.log10(suggestion), np.log10(lrs), losses)
return suggestion, (suggestion, idx)
# %% ../../nbs/14_callback.schedule.ipynb 84
def minimum(lrs:list, losses:list, num_it:int):
"Suggests a learning rate one-tenth the minumum before divergance and returns its index"
lr_min = lrs[losses.argmin()].item()
loss_idx = losses[min(range(len(lrs)), key=lambda i: abs(lrs[i]-lr_min))]
return lr_min/10, (lr_min, loss_idx)
# %% ../../nbs/14_callback.schedule.ipynb 86
def steep(lrs:list, losses:list, num_it:int) -> (float, tuple):
"Suggests a learning rate when the slope is the steepest and returns its index"
grads = (losses[1:]-losses[:-1]) / (lrs[1:].log()-lrs[:-1].log())
lr_steep = lrs[grads.argmin()].item()
loss_idx = losses[min(range(len(lrs)), key=lambda i: abs(lrs[i]-lr_steep))]
return lr_steep, (lr_steep, loss_idx)
# %% ../../nbs/14_callback.schedule.ipynb 88
@patch
def plot_lr_find(self:Recorder, skip_end=5, return_fig=True, suggestions=None, nms=None, **kwargs):
"Plot the result of an LR Finder test (won't work if you didn't do `learn.lr_find()` before)"
lrs = self.lrs if skip_end==0 else self.lrs [:-skip_end]
losses = self.losses if skip_end==0 else self.losses[:-skip_end]
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
if suggestions:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'][1:]
for (val, idx), nm, color in zip(suggestions, nms, colors):
ax.plot(val, idx, 'o', label=nm, c=color)
ax.legend(loc='best')
# %% ../../nbs/14_callback.schedule.ipynb 89
mk_class("SuggestionMethod", **{o.__name__.capitalize():o for o in [valley,slide,minimum,steep]},
doc="All possible suggestion methods as convience attributes to get tab-completion and typo-proofing")
# %% ../../nbs/14_callback.schedule.ipynb 90
@patch
def lr_find(self:Learner, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True, show_plot=True, suggest_funcs=(SuggestionMethod.Valley)):
"Launch a mock training to find a good learning rate and return suggestions based on `suggest_funcs` as a named tuple"
n_epoch = num_it//len(self.dls.train) + 1
cb=LRFinder(start_lr=start_lr, end_lr=end_lr, num_it=num_it, stop_div=stop_div)
with self.no_logging(): self.fit(n_epoch, cbs=cb)
if suggest_funcs is not None:
lrs, losses = tensor(self.recorder.lrs[num_it//10:-5]), tensor(self.recorder.losses[num_it//10:-5])
nan_idxs = torch.nonzero(torch.isnan(losses.view(-1)))
if len(nan_idxs) > 0:
drop_idx = min(nan_idxs)
lrs = lrs[:drop_idx]
losses = losses[:drop_idx]
_suggestions, nms = [], []
for func in tuplify(suggest_funcs):
nms.append(func.__name__ if not isinstance(func, partial) else func.func.__name__) # deal with partials
_suggestions.append(func(lrs, losses, num_it))
SuggestedLRs = collections.namedtuple('SuggestedLRs', nms)
lrs, pnts = [], []
for lr, pnt in _suggestions:
lrs.append(lr)
pnts.append(pnt)
if show_plot: self.recorder.plot_lr_find(suggestions=pnts, nms=nms)
return SuggestedLRs(*lrs)
elif show_plot: self.recorder.plot_lr_find()
| 14,363 | 44.6 | 136 | py |
fastai | fastai-master/fastai/callback/fp16.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/18_callback.fp16.ipynb.
# %% ../../nbs/18_callback.fp16.ipynb 2
from __future__ import annotations
from ..basics import *
from .progress import *
from torch.cuda.amp import GradScaler,autocast
from torch.cuda.amp.grad_scaler import OptState
# %% auto 0
__all__ = ['MixedPrecision', 'FP16TestCallback', 'get_master', 'to_master_grads', 'to_model_params', 'test_overflow',
'grad_overflow', 'copy_clone', 'ModelToHalf', 'NonNativeMixedPrecision']
# %% ../../nbs/18_callback.fp16.ipynb 17
@delegates(GradScaler)
class MixedPrecision(Callback):
"Mixed precision training using Pytorch's `autocast` and `GradScaler`"
order = 10
def __init__(self, **kwargs): self.kwargs = kwargs
def before_fit(self):
self.autocast,self.learn.scaler,self.scales = autocast(),GradScaler(**self.kwargs),L()
def before_batch(self): self.autocast.__enter__()
def after_pred(self):
if next(flatten(self.pred)).dtype==torch.float16: self.learn.pred = to_float(self.pred)
def after_loss(self): self.autocast.__exit__(None, None, None)
def before_backward(self): self.learn.loss_grad = self.scaler.scale(self.loss_grad)
def before_step(self):
"Use `self` as a fake optimizer. `self.skipped` will be set to True `after_step` if gradients overflow. "
self.skipped=True
self.scaler.step(self)
if self.skipped: raise CancelStepException()
self.scales.append(self.scaler.get_scale())
def after_step(self): self.learn.scaler.update()
@property
def param_groups(self):
"Pretend to be an optimizer for `GradScaler`"
return self.opt.param_groups
def step(self, *args, **kwargs):
"Fake optimizer step to detect whether this batch was skipped from `GradScaler`"
self.skipped=False
def after_fit(self): self.autocast,self.learn.scaler,self.scales = None,None,None
# %% ../../nbs/18_callback.fp16.ipynb 19
class FP16TestCallback(Callback):
"Asserts that predictions are `float16` values"
order = 9
def after_pred(self): assert listify(flatten(self.pred))[0].dtype==torch.float16
# %% ../../nbs/18_callback.fp16.ipynb 22
@patch
@delegates(GradScaler)
def to_fp16(self:Learner, **kwargs): return self.add_cb(MixedPrecision(**kwargs))
# %% ../../nbs/18_callback.fp16.ipynb 23
@patch
def to_fp32(self:Learner): return self.remove_cb(MixedPrecision)
# %% ../../nbs/18_callback.fp16.ipynb 26
from ..fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params
# %% ../../nbs/18_callback.fp16.ipynb 32
from torch.nn.utils import parameters_to_vector
# %% ../../nbs/18_callback.fp16.ipynb 33
def get_master(
opt:Optimizer, # Optimizer from which to retrieve model params
flat_master:bool=False, # Flatten fp32 params into a vector for better performance
) -> list: # List of fp16 params, and list of fp32 params
"Creates fp16 model params given an initialized `Optimizer`, also returning fp32 model params. "
model_params = [[param for param in pg if getattr(param, 'requires_grad', False) and hasattr(param, 'data')] for pg in opt.param_lists]
if flat_master:
master_params = []
for pg in model_params:
mp = parameters_to_vector([param.data.float() for param in pg])
mp = nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]
return model_params, master_params
# %% ../../nbs/18_callback.fp16.ipynb 38
def to_master_grads(
model_pgs:list, # Fp16 model parameters to copy gradients from
master_pgs:list, # Fp32 model parameters to copy gradients to
flat_master:bool=False, # Whether or not fp32 parameters were previously flattened
):
"Move fp16 model gradients to fp32 master gradients"
for (model_params,master_params) in zip(model_pgs,master_pgs):
model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)
# %% ../../nbs/18_callback.fp16.ipynb 42
def to_model_params(
model_pgs:list, # Fp16 model params to copy to
master_pgs:list, # Fp32 master params to copy from
flat_master:bool=False # Whether master_pgs was previously flattened
)->None:
"Copy updated fp32 master params to fp16 model params after gradient step. "
for (model_params,master_params) in zip(model_pgs,master_pgs):
master_params_to_model_params(model_params, master_params, flat_master=flat_master)
# %% ../../nbs/18_callback.fp16.ipynb 47
def test_overflow(x:torch.Tensor):
"Tests whether fp16 gradients have overflown."
s = float(x.float().sum())
return (s == float('inf') or s == float('-inf') or s != s)
# %% ../../nbs/18_callback.fp16.ipynb 50
def grad_overflow(pgs:list)->bool:
"Tests all fp16 parameters in pgs for gradient overflow"
for pg in pgs:
for p in pg:
if p.grad is not None and test_overflow(p.grad.data): return True
return False
# %% ../../nbs/18_callback.fp16.ipynb 53
def copy_clone(d):
return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}
# %% ../../nbs/18_callback.fp16.ipynb 54
def _copy_state(opt, pgs1, pgs2):
opt.param_lists = pgs2
for pg1,pg2 in zip(pgs1, pgs2):
for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))
# %% ../../nbs/18_callback.fp16.ipynb 55
class ModelToHalf(Callback):
"Use with NonNativeMixedPrecision callback (but it needs to run at the very beginning)"
order=-50
def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)
def after_fit (self): self.learn.model = convert_network(self.model, dtype=torch.float32)
# %% ../../nbs/18_callback.fp16.ipynb 56
@docs
class NonNativeMixedPrecision(Callback):
"Run training in mixed precision"
order=10
def __init__(self,
loss_scale:int=512, # Non-dynamic loss scale, used to avoid underflow of gradients.
flat_master:bool=False, # Whether to flatten fp32 parameters for performance
dynamic:bool=True, # Whether to automatically determine loss scaling
max_loss_scale:float=2.**24, # Starting value for dynamic loss scaling
div_factor:float=2., # Divide by this on overflow, multiply by this after scale_wait batches
scale_wait:int=500, # Number of batches to wait for increasing loss scale
clip:float=None, # Value to clip gradients at, max_norm, as in `nn.utils.clip_grad_norm_`
):
assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn."
self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale
self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip
self.loss_scale = max_loss_scale if dynamic else loss_scale
def before_fit(self):
assert self.dls.device.type == 'cuda', "Mixed-precision training requires a GPU, remove the call `to_fp16`"
if self.learn.opt is None: self.learn.create_opt()
self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master)
self.old_pgs = self.opt.param_lists
#Changes the optimizer so that the optimization step is done in FP32.
_copy_state(self.learn.opt, self.model_pgs, self.master_pgs)
if self.dynamic: self.count = 0
def before_batch(self): self.learn.xb = to_half(self.xb)
def after_pred(self): self.learn.pred = to_float(self.pred)
def before_backward(self): self.learn.loss_grad *= self.loss_scale
def before_step(self):
#First, check for an overflow
if self.dynamic and grad_overflow(self.model_pgs):
self.loss_scale /= self.div_factor
self.learn.loss_grad /= self.div_factor #to record correct loss
self.model.zero_grad()
raise CancelBatchException() #skip step and zero_grad
to_master_grads(self.model_pgs, self.master_pgs, self.flat_master)
for master_params in self.master_pgs:
for param in master_params:
if param.grad is not None: param.grad.div_(self.loss_scale)
if self.clip is not None:
for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip)
# Check if it's been long enough without overflow
if self.dynamic:
self.count += 1
if self.count == self.scale_wait:
self.count = 0
self.loss_scale *= self.div_factor
def after_step(self):
self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected)
to_model_params(self.model_pgs, self.master_pgs, self.flat_master)
def after_batch(self):
if self.training: self.learn.loss_grad /= self.loss_scale #Log correct loss
def after_fit(self):
if not hasattr(self,'master_pgs'): return
_copy_state(self.learn.opt, self.master_pgs, self.model_pgs)
self.learn.opt.param_lists = self.old_pgs
delattr(self, "master_pgs")
delattr(self, "model_pgs")
delattr(self, "old_pgs")
_docs = dict(before_fit="Put the model in FP16 and prepare the two copies of the parameters",
before_batch="Put the input in FP16",
after_pred="Put the output back to FP32 so that the loss is computed in FP32",
before_backward="Apply loss scaling to avoid gradient underflow",
before_step="Update and apply dynamic loss scaling, move gradients to fp32, apply gradient clipping",
after_step="Zero fp16 grads and update fp16 params with fp32 params. ",
after_batch="Ensure loss is logged correctly",
after_fit="Put the model back in FP32")
# %% ../../nbs/18_callback.fp16.ipynb 60
@patch
@delegates(NonNativeMixedPrecision.__init__)
def to_non_native_fp16(self:Learner, **kwargs): return self.add_cbs([ModelToHalf(), NonNativeMixedPrecision(**kwargs)])
# %% ../../nbs/18_callback.fp16.ipynb 63
@patch
def to_non_native_fp32(self: Learner): return self.remove_cbs([ModelToHalf, NonNativeMixedPrecision])
| 10,319 | 46.33945 | 139 | py |
fastai | fastai-master/fastai/vision/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/07_vision.core.ipynb.
# %% ../../nbs/07_vision.core.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
from PIL import Image
try: BILINEAR,NEAREST = Image.Resampling.BILINEAR,Image.Resampling.NEAREST
except AttributeError: from PIL.Image import BILINEAR,NEAREST
# %% auto 0
__all__ = ['imagenet_stats', 'cifar_stats', 'mnist_stats', 'OpenMask', 'TensorPointCreate', 'to_image', 'load_image',
'image2tensor', 'PILBase', 'PILImage', 'PILImageBW', 'PILMask', 'AddMaskCodes', 'TensorPoint',
'get_annotations', 'TensorBBox', 'LabeledBBox', 'encodes', 'PointScaler', 'BBoxLabeler', 'decodes',
'BILINEAR', 'NEAREST', 'Image', 'ToTensor']
# %% ../../nbs/07_vision.core.ipynb 3
_all_ = ['BILINEAR','NEAREST']
# %% ../../nbs/07_vision.core.ipynb 6
_all_ = ['Image','ToTensor']
# %% ../../nbs/07_vision.core.ipynb 7
@patch
def __repr__(x:Image.Image):
return "<%s.%s image mode=%s size=%dx%d>" % (x.__class__.__module__, x.__class__.__name__, x.mode, x.size[0], x.size[1])
# %% ../../nbs/07_vision.core.ipynb 10
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
mnist_stats = ([0.131], [0.308])
# %% ../../nbs/07_vision.core.ipynb 12
if not hasattr(Image,'_patched'):
_old_sz = Image.Image.size.fget
@patch(as_prop=True)
def size(x:Image.Image): return fastuple(_old_sz(x))
Image._patched = True
# %% ../../nbs/07_vision.core.ipynb 13
@patch(as_prop=True)
def n_px(x: Image.Image): return x.size[0] * x.size[1]
# %% ../../nbs/07_vision.core.ipynb 15
@patch(as_prop=True)
def shape(x: Image.Image): return x.size[1],x.size[0]
# %% ../../nbs/07_vision.core.ipynb 17
@patch(as_prop=True)
def aspect(x: Image.Image): return x.size[0]/x.size[1]
# %% ../../nbs/07_vision.core.ipynb 19
@patch
def reshape(x: Image.Image, h, w, resample=0):
"`resize` `x` to `(w,h)`"
return x.resize((w,h), resample=resample)
# %% ../../nbs/07_vision.core.ipynb 22
@patch
def to_bytes_format(im:Image.Image, format='png'):
"Convert to bytes, default to PNG format"
arr = io.BytesIO()
im.save(arr, format=format)
return arr.getvalue()
# %% ../../nbs/07_vision.core.ipynb 24
@patch
def to_thumb(self:Image.Image, h, w=None):
"Same as `thumbnail`, but uses a copy"
if w is None: w=h
im = self.copy()
im.thumbnail((w,h))
return im
# %% ../../nbs/07_vision.core.ipynb 26
@patch
def resize_max(x: Image.Image, resample=0, max_px=None, max_h=None, max_w=None):
"`resize` `x` to `max_px`, or `max_h`, or `max_w`"
h,w = x.shape
if max_px and x.n_px>max_px: h,w = fastuple(h,w).mul(math.sqrt(max_px/x.n_px))
if max_h and h>max_h: h,w = (max_h ,max_h*w/h)
if max_w and w>max_w: h,w = (max_w*h/w,max_w )
return x.reshape(round(h), round(w), resample=resample)
# %% ../../nbs/07_vision.core.ipynb 31
def to_image(x):
"Convert a tensor or array to a PIL int8 Image"
if isinstance(x,Image.Image): return x
if isinstance(x,Tensor): x = to_np(x.permute((1,2,0)))
if x.dtype==np.float32: x = (x*255).astype(np.uint8)
return Image.fromarray(x, mode=['RGB','CMYK'][x.shape[0]==4])
# %% ../../nbs/07_vision.core.ipynb 32
def load_image(fn, mode=None):
"Open and load a `PIL.Image` and convert to `mode`"
im = Image.open(fn)
im.load()
im = im._new(im.im)
return im.convert(mode) if mode else im
# %% ../../nbs/07_vision.core.ipynb 33
def image2tensor(img):
"Transform image to byte tensor in `c*h*w` dim order."
res = tensor(img)
if res.dim()==2: res = res.unsqueeze(-1)
return res.permute(2,0,1)
# %% ../../nbs/07_vision.core.ipynb 34
class PILBase(Image.Image, metaclass=BypassNewMeta):
"Base class for a Pillow `Image` that can show itself and convert to a Tensor"
_bypass_type=Image.Image
_show_args = {'cmap':'viridis'}
_open_args = {'mode': 'RGB'}
@classmethod
def create(cls, fn:Path|str|Tensor|ndarray|bytes|Image.Image, **kwargs):
"Return an Image from `fn`"
if isinstance(fn,TensorImage): fn = fn.permute(1,2,0).type(torch.uint8)
if isinstance(fn,TensorMask): fn = fn.type(torch.uint8)
if isinstance(fn,Tensor): fn = fn.numpy()
if isinstance(fn,ndarray): return cls(Image.fromarray(fn))
if isinstance(fn,bytes): fn = io.BytesIO(fn)
if isinstance(fn,Image.Image): return cls(fn)
return cls(load_image(fn, **merge(cls._open_args, kwargs)))
def show(self, ctx=None, **kwargs):
"Show image using `merge(self._show_args, kwargs)`"
return show_image(self, ctx=ctx, **merge(self._show_args, kwargs))
def __repr__(self): return f'{self.__class__.__name__} mode={self.mode} size={"x".join([str(d) for d in self.size])}'
# %% ../../nbs/07_vision.core.ipynb 38
class PILImage(PILBase):
"A RGB Pillow `Image` that can show itself and converts to `TensorImage`"
pass
# %% ../../nbs/07_vision.core.ipynb 39
class PILImageBW(PILImage):
"A BW Pillow `Image` that can show itself and converts to `TensorImageBW`"
_show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
# %% ../../nbs/07_vision.core.ipynb 48
class PILMask(PILBase):
"A Pillow `Image` Mask that can show itself and converts to `TensorMask`"
_open_args,_show_args = {'mode':'L'},{'alpha':0.5, 'cmap':'tab20'}
# %% ../../nbs/07_vision.core.ipynb 50
OpenMask = Transform(PILMask.create)
OpenMask.loss_func = CrossEntropyLossFlat(axis=1)
PILMask.create = OpenMask
# %% ../../nbs/07_vision.core.ipynb 55
class AddMaskCodes(Transform):
"Add the code metadata to a `TensorMask`"
def __init__(self, codes=None):
self.codes = codes
if codes is not None: self.vocab,self.c = codes,len(codes)
def decodes(self, o:TensorMask):
if self.codes is not None: o.codes=self.codes
return o
# %% ../../nbs/07_vision.core.ipynb 60
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, img_size=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
# %% ../../nbs/07_vision.core.ipynb 61
TensorPointCreate = Transform(TensorPoint.create)
TensorPointCreate.loss_func = MSELossFlat()
TensorPoint.create = TensorPointCreate
# %% ../../nbs/07_vision.core.ipynb 66
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {o['id']:o['name'] for o in annot_dict['categories']}
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]])
id2cats[o['image_id']].append(classes[o['category_id']])
id2images = {o['id']:ifnone(prefix, '') + o['file_name'] for o in annot_dict['images'] if o['id'] in id2bboxes}
ids = list(id2images.keys())
return [id2images[k] for k in ids], [(id2bboxes[k], id2cats[k]) for k in ids]
# %% ../../nbs/07_vision.core.ipynb 69
from matplotlib import patches, patheffects
# %% ../../nbs/07_vision.core.ipynb 70
def _draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'), patheffects.Normal()])
def _draw_rect(ax, b, color='white', text=None, text_size=14, hw=True, rev=False):
lx,ly,w,h = b
if rev: lx,ly,w,h = ly,lx,h,w
if not hw: w,h = w-lx,h-ly
patch = ax.add_patch(patches.Rectangle((lx,ly), w, h, fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(lx,ly, text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
# %% ../../nbs/07_vision.core.ipynb 71
class TensorBBox(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, img_size=None)->None: return cls(tensor(x).view(-1, 4).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
# %% ../../nbs/07_vision.core.ipynb 73
class LabeledBBox(L):
"Basic type for a list of bounding boxes in an image"
def show(self, ctx=None, **kwargs):
for b,l in zip(self.bbox, self.lbl):
if l != '#na#': ctx = retain_type(b, self.bbox).show(ctx=ctx, text=l)
return ctx
bbox,lbl = add_props(lambda i,self: self[i])
# %% ../../nbs/07_vision.core.ipynb 78
PILImage ._tensor_cls = TensorImage
PILImageBW._tensor_cls = TensorImageBW
PILMask ._tensor_cls = TensorMask
# %% ../../nbs/07_vision.core.ipynb 79
@ToTensor
def encodes(self, o:PILBase): return o._tensor_cls(image2tensor(o))
@ToTensor
def encodes(self, o:PILMask): return o._tensor_cls(image2tensor(o)[0])
# %% ../../nbs/07_vision.core.ipynb 87
def _scale_pnts(y, sz, do_scale=True, y_first=False):
if y_first: y = y.flip(1)
res = y * 2/tensor(sz).float() - 1 if do_scale else y
return TensorPoint(res, img_size=sz)
def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, img_size=sz)
# %% ../../nbs/07_vision.core.ipynb 88
class PointScaler(Transform):
"Scale a tensor representing points"
order = 1
def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first
def _grab_sz(self, x):
self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size
return x
def _get_sz(self, x): return getattr(x, 'img_size') if self.sz is None else self.sz
def setups(self, dl):
res = first(dl.do_item(None), risinstance(TensorPoint))
if res is not None: self.c = res.numel()
def encodes(self, x:PILBase|TensorImageBase): return self._grab_sz(x)
def decodes(self, x:PILBase|TensorImageBase): return self._grab_sz(x)
def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x))
# %% ../../nbs/07_vision.core.ipynb 95
class BBoxLabeler(Transform):
def setups(self, dl): self.vocab = dl.vocab
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
# %% ../../nbs/07_vision.core.ipynb 96
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
# %% ../../nbs/07_vision.core.ipynb 97
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
| 11,701 | 36.993506 | 124 | py |
fastai | fastai-master/fastai/vision/widgets.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/09c_vision.widgets.ipynb.
# %% ../../nbs/09c_vision.widgets.ipynb 3
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
from .core import *
from fastcore.parallel import *
from ipywidgets import HBox,VBox,widgets,Button,Checkbox,Dropdown,Layout,Box,Output,Label,FileUpload
# %% auto 0
__all__ = ['widget', 'carousel', 'ImagesCleaner', 'ImageClassifierCleaner', 'HBox', 'VBox', 'widgets', 'Button', 'Checkbox',
'Dropdown', 'Layout', 'Box', 'Output', 'Label', 'FileUpload']
# %% ../../nbs/09c_vision.widgets.ipynb 5
_all_ = ['HBox','VBox','widgets','Button','Checkbox','Dropdown','Layout','Box','Output','Label','FileUpload']
# %% ../../nbs/09c_vision.widgets.ipynb 7
@patch
def __getitem__(self:Box, i): return self.children[i]
# %% ../../nbs/09c_vision.widgets.ipynb 8
def widget(im, *args, **layout) -> Output:
"Convert anything that can be `display`ed by `IPython` into a widget"
o = Output(layout=merge(*args, layout))
with o: display(im)
return o
# %% ../../nbs/09c_vision.widgets.ipynb 11
def _update_children(
change:dict # A dictionary holding the information about the changed widget
):
"Sets a value to the `layout` attribute on widget initialization and change"
for o in change['owner'].children:
if not o.layout.flex: o.layout.flex = '0 0 auto'
# %% ../../nbs/09c_vision.widgets.ipynb 12
def carousel(
children:tuple|MutableSequence=(), # `Box` objects to display in a carousel
**layout
) -> Box: # An `ipywidget`'s carousel
"A horizontally scrolling carousel"
def_layout = dict(overflow='scroll hidden', flex_flow='row', display='flex')
res = Box([], layout=merge(def_layout, layout))
res.observe(_update_children, names='children')
res.children = children
return res
# %% ../../nbs/09c_vision.widgets.ipynb 15
def _open_thumb(
fn:Path|str, # A path of an image
h:int, # Thumbnail Height
w:int # Thumbnail Width
) -> Image: # `PIL` image to display
"Opens an image path and returns the thumbnail of the image"
return Image.open(fn).to_thumb(h, w).convert('RGBA')
# %% ../../nbs/09c_vision.widgets.ipynb 16
class ImagesCleaner:
"A widget that displays all images in `fns` along with a `Dropdown`"
def __init__(self,
opts:tuple=(), # Options for the `Dropdown` menu
height:int=128, # Thumbnail Height
width:int=256, # Thumbnail Width
max_n:int=30 # Max number of images to display
):
opts = ('<Keep>', '<Delete>')+tuple(opts)
store_attr('opts,height,width,max_n')
self.widget = carousel(width='100%')
def set_fns(self,
fns:list # Contains a path to each image
):
"Sets a `thumbnail` and a `Dropdown` menu for each `VBox`"
self.fns = L(fns)[:self.max_n]
ims = parallel(_open_thumb, self.fns, h=self.height, w=self.width, progress=False,
n_workers=min(len(self.fns)//10,defaults.cpus))
self.widget.children = [VBox([widget(im, height=f'{self.height}px'), Dropdown(
options=self.opts, layout={'width': 'max-content'})]) for im in ims]
def _ipython_display_(self): display(self.widget)
def values(self) -> list:
"Current values of `Dropdown` for each `VBox`"
return L(self.widget.children).itemgot(1).attrgot('value')
def delete(self) -> list:
"Indices of items to delete"
return self.values().argwhere(eq('<Delete>'))
def change(self) -> list:
"Tuples of the form (index of item to change, new class)"
idxs = self.values().argwhere(not_(in_(['<Delete>','<Keep>'])))
return idxs.zipwith(self.values()[idxs])
# %% ../../nbs/09c_vision.widgets.ipynb 20
def _get_iw_info(
learn,
ds_idx:int=0 # Index in `learn.dls`
) -> list:
"For every image in `dls` `zip` it's `Path`, target and loss"
dl = learn.dls[ds_idx].new(shuffle=False, drop_last=False)
probs,targs,preds,losses = learn.get_preds(dl=dl, with_input=False, with_loss=True, with_decoded=True)
targs = [dl.vocab[t] for t in targs]
return L([dl.dataset.items,targs,losses]).zip()
# %% ../../nbs/09c_vision.widgets.ipynb 21
@delegates(ImagesCleaner)
class ImageClassifierCleaner(GetAttr):
"A widget that provides an `ImagesCleaner` for a CNN `Learner`"
def __init__(self, learn, **kwargs):
vocab = learn.dls.vocab
self.default = self.iw = ImagesCleaner(vocab, **kwargs)
self.dd_cats = Dropdown(options=vocab)
self.dd_ds = Dropdown(options=('Train','Valid'))
self.iwis = _get_iw_info(learn,0),_get_iw_info(learn,1)
self.dd_ds.observe(self.on_change_ds, 'value')
self.dd_cats.observe(self.on_change_ds, 'value')
self.on_change_ds()
self.widget = VBox([self.dd_cats, self.dd_ds, self.iw.widget])
def _ipython_display_(self): display(self.widget)
def on_change_ds(self,change=None):
"Toggle between training validation set view"
info = L(o for o in self.iwis[self.dd_ds.index] if o[1]==self.dd_cats.value)
self.iw.set_fns(info.sorted(2, reverse=True).itemgot(0))
| 5,186 | 40.830645 | 124 | py |
fastai | fastai-master/fastai/vision/augment.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/09_vision.augment.ipynb.
# %% ../../nbs/09_vision.augment.ipynb 3
from __future__ import annotations
from ..data.all import *
from .core import *
from .data import *
# %% auto 0
__all__ = ['TensorTypes', 'RandTransform', 'FlipItem', 'DihedralItem', 'CropPad', 'RandomCrop', 'OldRandomCrop', 'Resize',
'RandomResizedCrop', 'RatioResize', 'affine_grid', 'AffineCoordTfm', 'RandomResizedCropGPU', 'mask_tensor',
'affine_mat', 'flip_mat', 'Flip', 'DeterministicDraw', 'DeterministicFlip', 'dihedral_mat', 'Dihedral',
'DeterministicDihedral', 'rotate_mat', 'Rotate', 'zoom_mat', 'Zoom', 'find_coeffs', 'apply_perspective',
'Warp', 'SpaceTfm', 'LightingTfm', 'Brightness', 'Contrast', 'grayscale', 'Saturation', 'rgb2hsv', 'hsv2rgb',
'HSVTfm', 'Hue', 'cutout_gaussian', 'norm_apply_denorm', 'RandomErasing', 'setup_aug_tfms', 'aug_transforms',
'PadMode', 'ResizeMethod']
# %% ../../nbs/09_vision.augment.ipynb 5
from torch import stack, zeros_like as t0, ones_like as t1
from torch.distributions.bernoulli import Bernoulli
# %% ../../nbs/09_vision.augment.ipynb 8
class RandTransform(DisplayedTransform):
"A transform that before_call its state at each `__call__`"
do,nm,supports,split_idx = True,None,[],0
def __init__(self,
p:float=1., # Probability of applying Transform
nm:str=None,
before_call:callable=None, # Optional batchwise preprocessing function
**kwargs
):
store_attr('p')
super().__init__(**kwargs)
self.before_call = ifnone(before_call,self.before_call)
def before_call(self,
b,
split_idx:int, # Index of the train/valid dataset
):
"This function can be overridden. Set `self.do` based on `self.p`"
self.do = self.p==1. or random.random() < self.p
def __call__(self,
b,
split_idx:int=None, # Index of the train/valid dataset
**kwargs
):
self.before_call(b, split_idx=split_idx)
return super().__call__(b, split_idx=split_idx, **kwargs) if self.do else b
# %% ../../nbs/09_vision.augment.ipynb 14
def _neg_axis(x, axis):
x[...,axis] = -x[...,axis]
return x
TensorTypes = (TensorImage,TensorMask,TensorPoint,TensorBBox)
# %% ../../nbs/09_vision.augment.ipynb 15
@patch
def flip_lr(x:Image.Image): return x.transpose(Image.FLIP_LEFT_RIGHT)
@patch
def flip_lr(x:TensorImageBase): return x.flip(-1)
@patch
def flip_lr(x:TensorPoint): return TensorPoint(_neg_axis(x.clone(), 0))
@patch
def flip_lr(x:TensorBBox): return TensorBBox(TensorPoint(x.view(-1,2)).flip_lr().view(-1,4))
# %% ../../nbs/09_vision.augment.ipynb 18
class FlipItem(RandTransform):
"Randomly flip with probability `p`"
def __init__(self, p:float=0.5): super().__init__(p=p)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.flip_lr()
# %% ../../nbs/09_vision.augment.ipynb 21
@patch
def dihedral(x:PILImage,
k:int, # Dihedral transformation to apply
):
return x if k==0 else x.transpose(k-1)
@patch
def dihedral(x:TensorImage,
k:int, # Dihedral transformation to apply
):
if k in [1,3,4,7]: x = x.flip(-1)
if k in [2,4,5,7]: x = x.flip(-2)
if k in [3,5,6,7]: x = x.transpose(-1,-2)
return x
@patch
def dihedral(x:TensorPoint,
k:int, # Dihedral transformation to apply
):
if k in [1,3,4,7]: x = _neg_axis(x, 0)
if k in [2,4,5,7]: x = _neg_axis(x, 1)
if k in [3,5,6,7]: x = x.flip(1)
return x
@patch
def dihedral(x:TensorBBox,
k:int, #Dihedral transformation to apply
):
pnts = TensorPoint(x.view(-1,2)).dihedral(k).view(-1,2,2)
tl,br = pnts.min(dim=1)[0],pnts.max(dim=1)[0]
return TensorBBox(torch.cat([tl, br], dim=1), img_size=x.img_size)
# %% ../../nbs/09_vision.augment.ipynb 22
class DihedralItem(RandTransform):
"Randomly flip with probability `p`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.k = random.randint(0,7)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.dihedral(self.k)
# %% ../../nbs/09_vision.augment.ipynb 27
from torchvision.transforms.functional import pad as tvpad
# %% ../../nbs/09_vision.augment.ipynb 28
mk_class('PadMode', **{o:o.lower() for o in ['Zeros', 'Border', 'Reflection']},
doc="All possible padding mode as attributes to get tab-completion and typo-proofing")
# %% ../../nbs/09_vision.augment.ipynb 29
_all_ = ['PadMode']
# %% ../../nbs/09_vision.augment.ipynb 31
_pad_modes = {'zeros': 'constant', 'border': 'edge', 'reflection': 'reflect'}
@patch
def _do_crop_pad(x:Image.Image, sz, tl, orig_sz,
pad_mode=PadMode.Zeros, resize_mode=BILINEAR, resize_to=None):
if any(tl.ge(0)) or any(tl.add(sz).le(orig_sz)):
# At least one dim is inside the image, so needs to be cropped
c = tl.max(0)
x = x.crop((*c, *tl.add(sz).min(orig_sz)))
if any(tl.lt(0)) or any(tl.add(sz).ge(orig_sz)):
# At least one dim is outside the image, so needs to be padded
p = (-tl).max(0)
f = (sz-orig_sz).add(tl).max(0)
x = tvpad(x, (*p, *f), padding_mode=_pad_modes[pad_mode])
if resize_to is not None: x = x.resize(resize_to, resize_mode)
return x
@patch
def _do_crop_pad(x:TensorPoint, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
#assert pad_mode==PadMode.Zeros,"Only zero padding is supported for `TensorPoint` and `TensorBBox`"
orig_sz,sz,tl = map(FloatTensor, (orig_sz,sz,tl))
return TensorPoint((x+1)*orig_sz/sz - tl*2/sz - 1, sz=sz if resize_to is None else resize_to)
@patch
def _do_crop_pad(x:TensorBBox, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
bbox = TensorPoint._do_crop_pad(x.view(-1,2), sz, tl, orig_sz, pad_mode, resize_to).view(-1,4)
return TensorBBox(bbox, img_size=x.img_size)
@patch
def crop_pad(x:TensorBBox|TensorPoint|Image.Image,
sz:int|tuple, # Crop/pad size of input, duplicated if one value is specified
tl:tuple=None, # Optional top-left coordinate of the crop/pad, if `None` center crop
orig_sz:tuple=None, # Original size of input
pad_mode:PadMode=PadMode.Zeros, # Fastai padding mode
resize_mode=BILINEAR, # Pillow `Image` resize mode
resize_to:tuple=None # Optional post crop/pad resize of input
):
if isinstance(sz,int): sz = (sz,sz)
orig_sz = fastuple(_get_sz(x) if orig_sz is None else orig_sz)
sz,tl = fastuple(sz),fastuple(((_get_sz(x)-sz)//2) if tl is None else tl)
return x._do_crop_pad(sz, tl, orig_sz=orig_sz, pad_mode=pad_mode, resize_mode=resize_mode, resize_to=resize_to)
# %% ../../nbs/09_vision.augment.ipynb 32
def _process_sz(size):
if isinstance(size,int): size=(size,size)
return fastuple(size[1],size[0])
def _get_sz(x):
if isinstance(x, tuple): x = x[0]
if not isinstance(x, Tensor): return fastuple(x.size)
return fastuple(getattr(x, 'img_size', getattr(x, 'sz', (x.shape[-1], x.shape[-2]))))
# %% ../../nbs/09_vision.augment.ipynb 33
@delegates()
class CropPad(DisplayedTransform):
"Center crop or pad an image to `size`"
order = 0
def __init__(self,
size:int|tuple, # Size to crop or pad to, duplicated if one value is specified
pad_mode:PadMode=PadMode.Zeros, # A `PadMode`
**kwargs
):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
orig_sz = _get_sz(x)
tl = (orig_sz-self.size)//2
return x.crop_pad(self.size, tl, orig_sz=orig_sz, pad_mode=self.pad_mode)
# %% ../../nbs/09_vision.augment.ipynb 42
@delegates()
class RandomCrop(RandTransform):
"Randomly crop an image to `size`"
split_idx,order = None,1
def __init__(self,
size:int|tuple, # Size to crop to, duplicated if one value is specified
**kwargs
):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def before_call(self,
b,
split_idx:int # Index of the train/valid dataset
):
"Randomly positioning crop if train dataset else center crop"
self.orig_sz = _get_sz(b)
if split_idx: self.tl = (self.orig_sz-self.size)//2
else:
wd = self.orig_sz[0] - self.size[0]
hd = self.orig_sz[1] - self.size[1]
w_rand = (wd, -1) if wd < 0 else (0, wd)
h_rand = (hd, -1) if hd < 0 else (0, hd)
self.tl = fastuple(random.randint(*w_rand), random.randint(*h_rand))
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
return x.crop_pad(self.size, self.tl, orig_sz=self.orig_sz)
# %% ../../nbs/09_vision.augment.ipynb 44
class OldRandomCrop(CropPad):
"Randomly crop an image to `size`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
w,h = self.orig_sz
if not split_idx: self.tl = (random.randint(0,w-self.cp_size[0]), random.randint(0,h-self.cp_size[1]))
# %% ../../nbs/09_vision.augment.ipynb 50
mk_class('ResizeMethod', **{o:o.lower() for o in ['Squish', 'Crop', 'Pad']},
doc="All possible resize method as attributes to get tab-completion and typo-proofing")
# %% ../../nbs/09_vision.augment.ipynb 51
_all_ = ['ResizeMethod']
# %% ../../nbs/09_vision.augment.ipynb 55
@delegates()
class Resize(RandTransform):
split_idx,mode,mode_mask,order = None,BILINEAR,NEAREST,1
"Resize image to `size` using `method`"
def __init__(self,
size:int|tuple, # Size to resize to, duplicated if one value is specified
method:ResizeMethod=ResizeMethod.Crop, # A `ResizeMethod`
pad_mode:PadMode=PadMode.Reflection, # A `PadMode`
resamples=(BILINEAR, NEAREST), # Pillow `Image` resamples mode, resamples[1] for mask
**kwargs
):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self,
b,
split_idx:int # Index of the train/valid dataset
):
if self.method==ResizeMethod.Squish: return
self.pcts = (0.5,0.5) if split_idx else (random.random(),random.random())
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
orig_sz = _get_sz(x)
if self.method==ResizeMethod.Squish:
return x.crop_pad(orig_sz, fastuple(0,0), orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
w,h = orig_sz
op = (operator.lt,operator.gt)[self.method==ResizeMethod.Pad]
m = w/self.size[0] if op(w/self.size[0],h/self.size[1]) else h/self.size[1]
cp_sz = (int(m*self.size[0]),int(m*self.size[1]))
tl = fastuple(int(self.pcts[0]*(w-cp_sz[0])), int(self.pcts[1]*(h-cp_sz[1])))
return x.crop_pad(cp_sz, tl, orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
# %% ../../nbs/09_vision.augment.ipynb 62
@delegates()
class RandomResizedCrop(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,1
def __init__(self,
size:int|tuple, # Final size, duplicated if one value is specified,,
min_scale:float=0.08, # Minimum scale of the crop, in relation to image area
ratio=(3/4, 4/3), # Range of width over height of the output
resamples=(BILINEAR, NEAREST), # Pillow `Image` resample mode, resamples[1] for mask
val_xtra:float=0.14, # The ratio of size at the edge cropped out in the validation set
max_scale:float=1., # Maximum scale of the crop, in relation to image area
**kwargs
):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self,
b,
split_idx # Index of the train/valid dataset
):
w,h = self.orig_sz = _get_sz(b)
if split_idx:
xtra = math.ceil(max(*self.size[:2])*self.val_xtra/8)*8
self.final_size = (self.size[0]+xtra, self.size[1]+xtra)
self.tl,self.cp_size = (0,0),self.orig_sz
return
self.final_size = self.size
for attempt in range(10):
area = random.uniform(self.min_scale, self.max_scale) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nw,nh)
self.tl = random.randint(0,w-nw), random.randint(0,h - nh)
return
if w/h < self.ratio[0]: self.cp_size = (w, int(w/self.ratio[0]))
elif w/h > self.ratio[1]: self.cp_size = (int(h*self.ratio[1]), h)
else: self.cp_size = (w, h)
self.tl = ((w-self.cp_size[0])//2, (h-self.cp_size[1])//2)
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
res = x.crop_pad(self.cp_size, self.tl, orig_sz=self.orig_sz,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.final_size)
if self.final_size != self.size: res = res.crop_pad(self.size) #Validation set: one final center crop
return res
# %% ../../nbs/09_vision.augment.ipynb 72
class RatioResize(DisplayedTransform):
'Resizes the biggest dimension of an image to `max_sz` maintaining the aspect ratio'
order = 1
def __init__(self,
max_sz: int, # Biggest dimension of the resized image
resamples=(BILINEAR, NEAREST), # Pillow `Image` resample mode, resamples[1] for mask
**kwargs
):
store_attr()
super().__init__(**kwargs)
def encodes(self, x:Image.Image|TensorBBox|TensorPoint):
w,h = _get_sz(x)
if w >= h: nw,nh = self.max_sz,h*self.max_sz/w
else: nw,nh = w*self.max_sz/h,self.max_sz
return Resize(size=(int(nh),int(nw)), resamples=self.resamples)(x)
# %% ../../nbs/09_vision.augment.ipynb 77
def _init_mat(x):
mat = torch.eye(3, device=x.device).float()
return mat.unsqueeze(0).expand(x.size(0), 3, 3).contiguous()
# %% ../../nbs/09_vision.augment.ipynb 80
def _grid_sample(x, coords, mode='bilinear', padding_mode='reflection', align_corners=None):
"Resample pixels in `coords` from `x` by `mode`, with `padding_mode` in ('reflection','border','zeros')."
#coords = coords.permute(0, 3, 1, 2).contiguous().permute(0, 2, 3, 1) # optimize layout for grid_sample
if mode=='bilinear': # hack to get smoother downwards resampling
mn,mx = coords.min(),coords.max()
# max amount we're affine zooming by (>1 means zooming in)
z = 1/(mx-mn).item()*2
# amount we're resizing by, with 100% extra margin
d = min(x.shape[-2]/coords.shape[-2], x.shape[-1]/coords.shape[-1])/2
# If we're resizing up by >200%, and we're zooming less than that, interpolate first
if d>1 and d>z:
x = F.interpolate(x, scale_factor=1/d, mode='area', recompute_scale_factor=True)
return F.grid_sample(x, coords, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 90
def affine_grid(
theta:Tensor, # Batch of affine transformation matrices
size:tuple, # Output size
align_corners:bool=None # PyTorch `F.grid_sample` align_corners
):
" Generates `TensorFlowField` from a transformation affine matrices `theta`"
return TensorFlowField(F.affine_grid(theta, size, align_corners=align_corners))
# %% ../../nbs/09_vision.augment.ipynb 91
@patch
def affine_coord(x: TensorImage,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm:callable=None, # Partial function of composable coordinate transforms
sz:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `TensorImage`
pad_mode=PadMode.Reflection, # Padding applied to `TensorImage`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorImage`"
if mat is None and coord_tfm is None and sz is None: return x
size = tuple(x.shape[-2:]) if sz is None else (sz,sz) if isinstance(sz,int) else tuple(sz)
if mat is None: mat = _init_mat(x)[:,:2]
coords = affine_grid(mat, x.shape[:2] + size, align_corners=align_corners)
if coord_tfm is not None: coords = coord_tfm(coords)
return TensorImage(_grid_sample(x, coords, mode=mode, padding_mode=pad_mode, align_corners=align_corners))
@patch
def affine_coord(x: TensorMask,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm:callable=None, # Partial function of composable coordinate transforms
sz:int|tuple=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorMask`
pad_mode=PadMode.Reflection, # Padding applied to `TensorMask`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorMask`"
add_dim = (x.ndim==3)
if add_dim: x = x[:,None]
res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode, align_corners).long()
if add_dim: res = res[:,0]
return TensorMask(res)
@patch
def affine_coord(x: TensorPoint,
mat:Tensor=None, # Batch of affine transformation matrices
coord_tfm=None, # Partial function of composable coordinate transforms
sz=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorPoint`
pad_mode=PadMode.Zeros, # Padding applied to `TensorPoint`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorPoint`"
#assert pad_mode==PadMode.Zeros, "Only zero padding is supported for `TensorPoint` and `TensorBBox`"
if sz is None: sz = getattr(x, "img_size", None)
if coord_tfm is not None: x = coord_tfm(x, invert=True)
if mat is not None:
mat = TensorPoint(mat)
x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))
return TensorPoint(x, sz=sz)
@patch
def affine_coord(x: TensorBBox,
mat=None, # Batch of affine transformation matrices
coord_tfm=None, # Partial function of composable coordinate transforms
sz=None, # Output size, duplicated if one value is specified
mode='nearest', # PyTorch `F.grid_sample` interpolation applied to `TensorBBox`
pad_mode=PadMode.Zeros, # Padding applied to `TensorBBox`
align_corners=True # PyTorch `F.grid_sample` align_corners
):
"Apply affine and coordinate transforms to `TensorBBox`"
if mat is None and coord_tfm is None: return x
if sz is None: sz = getattr(x, "img_size", None)
bs,n = x.shape[:2]
pnts = stack([x[...,:2], stack([x[...,0],x[...,3]],dim=2),
stack([x[...,2],x[...,1]],dim=2), x[...,2:]], dim=2)
pnts = TensorPoint(pnts.view(bs, 4*n, 2), img_size=sz).affine_coord(mat, coord_tfm, sz, mode, pad_mode)
pnts = pnts.view(bs, n, 4, 2)
tl,dr = pnts.min(dim=2)[0],pnts.max(dim=2)[0]
return TensorBBox(torch.cat([tl, dr], dim=2), img_size=sz)
# %% ../../nbs/09_vision.augment.ipynb 92
def _prepare_mat(x, mat):
h,w = getattr(x, 'img_size', x.shape[-2:])
mat[:,0,1] *= h/w
mat[:,1,0] *= w/h
return mat[:,:2]
# %% ../../nbs/09_vision.augment.ipynb 93
class AffineCoordTfm(RandTransform):
"Combine and apply affine and coord transforms"
order,split_idx = 30,None
def __init__(self,
aff_fs:callable|MutableSequence=None, # Affine transformations function for a batch
coord_fs:callable|MutableSequence=None, # Coordinate transformations function for a batch
size:int|tuple=None, # Output size, duplicated if one value is specified
mode='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
mode_mask='nearest', # Resample mode for mask
align_corners=None, # PyTorch `F.grid_sample` align_corners
**kwargs
):
store_attr(but=['aff_fs','coord_fs'])
super().__init__(**kwargs)
self.aff_fs,self.coord_fs = L(aff_fs),L(coord_fs)
self.cp_size = None if size is None else (size,size) if isinstance(size, int) else tuple(size)
def before_call(self,
b,
split_idx, # Index of the train/valid dataset
):
while isinstance(b, tuple): b = b[0]
self.split_idx = split_idx
self.do,self.mat = True,self._get_affine_mat(b)
for t in self.coord_fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `AffineCoordTfm` to only do the interpolation step once"
# TODO: keep `name` up to date with the combination
# TODO: have option to only show a subset of the attrs, e.g. for `Flip`
self.aff_fs += tfm.aff_fs
self.coord_fs += tfm.coord_fs
def _get_affine_mat(self, x):
aff_m = _init_mat(x)
if self.split_idx: return _prepare_mat(x, aff_m)
ms = [f(x) for f in self.aff_fs]
ms = [m for m in ms if m is not None]
for m in ms: aff_m = aff_m @ m
return _prepare_mat(x, aff_m)
def _encode(self, x, mode, reverse=False):
coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)
return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode, align_corners=self.align_corners)
def encodes(self, x:TensorImage): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
def encodes(self, x:TensorPoint|TensorBBox): return self._encode(x, self.mode, reverse=True)
# %% ../../nbs/09_vision.augment.ipynb 104
class RandomResizedCropGPU(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,30
def __init__(self,
size, # Final size, duplicated if one value is specified
min_scale=0.08, # Minimum scale of the crop, in relation to image area
ratio=(3/4, 4/3), # Range of width over height of the output
mode='bilinear', # PyTorch `F.grid_sample` interpolation
valid_scale=1., # Scale of the crop for the validation set, in relation to image area
max_scale=1., # Maximum scale of the crop, in relation to image area
mode_mask='nearest', # Interpolation mode for `TensorMask`
**kwargs
):
if isinstance(size, int): size = (size,size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.do = True
h,w = fastuple((b[0] if isinstance(b, tuple) else b).shape[-2:])
for attempt in range(10):
if split_idx: break
area = random.uniform(self.min_scale,self.max_scale) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nh,nw)
self.tl = random.randint(0,h - nh),random.randint(0,w-nw)
return
if w/h < self.ratio[0]: self.cp_size = (int(w/self.ratio[0]), w)
elif w/h > self.ratio[1]: self.cp_size = (h, int(h*self.ratio[1]))
else: self.cp_size = (h, w)
if split_idx: self.cp_size = (int(self.cp_size[0]*self.valid_scale), int(self.cp_size[1]*self.valid_scale))
self.tl = ((h-self.cp_size[0])//2,(w-self.cp_size[1])//2)
def _encode(self, x, mode):
x = x[...,self.tl[0]:self.tl[0]+self.cp_size[0], self.tl[1]:self.tl[1]+self.cp_size[1]]
return x.affine_coord(sz=self.size, mode=mode)
def encodes(self, x:TensorImage|TensorPoint|TensorBBox): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
# %% ../../nbs/09_vision.augment.ipynb 110
def mask_tensor(
x:Tensor, # Input `Tensor`
p=0.5, # Probability of not applying mask
neutral=0., # Mask value
batch=False # Apply identical mask to entire batch
):
"Mask elements of `x` with `neutral` with probability `1-p`"
if p==1.: return x
if batch: return x if random.random() < p else x.new_zeros(*x.size()) + neutral
if neutral != 0: x.add_(-neutral)
# Extra casting to float and long to prevent crashes on mps accelerator (issue #3911)
mask = x.new_empty(*x.size()).float().bernoulli_(p).long()
x.mul_(mask)
return x.add_(neutral) if neutral != 0 else x
# %% ../../nbs/09_vision.augment.ipynb 117
def _draw_mask(x, def_draw, draw=None, p=0.5, neutral=0., batch=False):
"Creates mask_tensor based on `x` with `neutral` with probability `1-p`. "
if draw is None: draw=def_draw
if callable(draw): res=draw(x)
elif is_listy(draw):
assert len(draw)>=x.size(0)
res = tensor(draw[:x.size(0)], dtype=x.dtype, device=x.device)
else: res = x.new_zeros(x.size(0)) + draw
return TensorBase(mask_tensor(res, p=p, neutral=neutral, batch=batch))
# %% ../../nbs/09_vision.augment.ipynb 126
def affine_mat(*ms):
"Restructure length-6 vector `ms` into an affine matrix with 0,0,1 in the last line"
return stack([stack([ms[0], ms[1], ms[2]], dim=1),
stack([ms[3], ms[4], ms[5]], dim=1),
stack([t0(ms[0]), t0(ms[0]), t1(ms[0])], dim=1)], dim=1)
# %% ../../nbs/09_vision.augment.ipynb 132
def flip_mat(
x:Tensor, # The input Tensor
p=0.5, # Probability of appying transformation
draw:int|MutableSequence|callable=None, # Custom flips instead of random
batch:bool=False # Apply identical flip to entire batch
):
"Return a random flip matrix"
def _def_draw(x): return x.new_ones(x.size(0))
mask = x.new_ones(x.size(0)) - 2*_draw_mask(x, _def_draw, draw=draw, p=p, batch=batch)
return affine_mat(mask, t0(mask), t0(mask),
t0(mask), t1(mask), t0(mask))
# %% ../../nbs/09_vision.augment.ipynb 136
def _get_default(x, mode=None, pad_mode=None):
if mode is None: mode='bilinear' if isinstance(x, TensorMask) else 'bilinear'
if pad_mode is None: pad_mode=PadMode.Zeros if isinstance(x, (TensorPoint, TensorBBox)) else PadMode.Reflection
x0 = x[0] if isinstance(x, tuple) else x
return x0,mode,pad_mode
# %% ../../nbs/09_vision.augment.ipynb 139
@patch
def flip_batch(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
p=0.5, # Probability of applying flip
draw:int|MutableSequence|callable=None, # Custom flips instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode=None, # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=None, # Padding applied to `x`
align_corners=True, # PyTorch `F.grid_sample` align_corners
batch=False # Apply identical flip to entire batch
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat=flip_mat(x0, p=p, draw=draw, batch=batch)
return x.affine_coord(mat=mat[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 141
class Flip(AffineCoordTfm):
"Randomly flip a batch of images with a probability `p`"
def __init__(self,
p=0.5, # Probability of applying flip
draw:int|MutableSequence|callable=None, # Custom flips instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners=True, # PyTorch `F.grid_sample` align_corners
batch=False # Apply identical flip to entire batch
):
aff_fs = partial(flip_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners, p=p)
# %% ../../nbs/09_vision.augment.ipynb 146
class DeterministicDraw():
def __init__(self, vals): self.vals,self.count = vals,-1
def __call__(self, x):
self.count += 1
return x.new_zeros(x.size(0)) + self.vals[self.count%len(self.vals)]
# %% ../../nbs/09_vision.augment.ipynb 148
class DeterministicFlip(Flip):
"Flip the batch every other call"
def __init__(self,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners=True, # PyTorch `F.grid_sample` align_corners
**kwargs
):
super().__init__(p=1., draw=DeterministicDraw([0,1]), mode=mode, pad_mode=pad_mode, align_corners=align_corners, **kwargs)
# %% ../../nbs/09_vision.augment.ipynb 153
def dihedral_mat(
x:Tensor, # Input `Tensor`
p:float=0.5, # Probability of staying unchanged
draw:int|MutableSequence|callable=None, # Custom dihedrals instead of random
batch:bool=False # Apply identical dihedral to entire batch
):
"Return a random dihedral matrix"
def _def_draw(x): return torch.randint(0,8, (x.size(0),), device=x.device)
def _def_draw_b(x): return random.randint(0,7) + x.new_zeros((x.size(0),)).long()
idx = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch).long()
xs = tensor([1,-1,1,-1,-1,1,1,-1], device=x.device).gather(0, idx)
ys = tensor([1,1,-1,1,-1,-1,1,-1], device=x.device).gather(0, idx)
m0 = tensor([1,1,1,0,1,0,0,0], device=x.device).gather(0, idx)
m1 = tensor([0,0,0,1,0,1,1,1], device=x.device).gather(0, idx)
return affine_mat(xs*m0, xs*m1, t0(xs),
ys*m1, ys*m0, t0(xs)).float()
# %% ../../nbs/09_vision.augment.ipynb 154
@patch
def dihedral_batch(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
p=0.5, # Probability of applying dihedral
draw:int|MutableSequence|callable=None, # Custom dihedrals instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=None, # Padding applied to `x`
batch=False, # Apply identical dihedral to entire batch
align_corners=True # PyTorch `F.grid_sample` align_corners
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, dihedral_mat(x0, p=p, draw=draw, batch=batch))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 155
class Dihedral(AffineCoordTfm):
"Apply a random dihedral transformation to a batch of images with a probability `p`"
def __init__(self,
p=0.5, # Probability of applying dihedral
draw:int|MutableSequence|callable=None, # Custom dihedrals instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
batch=False, # Apply identical dihedral to entire batch
align_corners=True # PyTorch `F.grid_sample` align_corners
):
f = partial(dihedral_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=f, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 160
class DeterministicDihedral(Dihedral):
def __init__(self,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners=None # PyTorch `F.grid_sample` align_corners
):
"Flip the batch every other call"
super().__init__(p=1., draw=DeterministicDraw(list(range(8))), pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 164
def rotate_mat(
x:Tensor, # Input `Tensor`
max_deg:int=10, # Maximum degree of rotation
p:float=0.5, # Probability of applying rotate
draw:int|MutableSequence|callable=None, # Custom rotates instead of random
batch:bool=False # Apply identical rotate to entire batch
):
"Return a random rotation matrix with `max_deg` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(-max_deg, max_deg)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(-max_deg, max_deg)
thetas = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch) * math.pi/180
return affine_mat(thetas.cos(), thetas.sin(), t0(thetas),
-thetas.sin(), thetas.cos(), t0(thetas))
# %% ../../nbs/09_vision.augment.ipynb 165
@patch
@delegates(rotate_mat)
def rotate(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str=None, # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=None, # Padding applied to `x`
align_corners:bool=True, # PyTorch `F.grid_sample` align_corners
**kwargs
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, rotate_mat(x0, **kwargs))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 166
class Rotate(AffineCoordTfm):
"Apply a random rotation of at most `max_deg` with probability `p` to a batch of images"
def __init__(self,
max_deg:int=10, # Maximum degree of rotation
p:float=0.5, # Probability of applying rotate
draw:int|MutableSequence|callable=None, # Custom rotates instead of random
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners:bool=True, # PyTorch `F.grid_sample` align_corners
batch:bool=False # Apply identical rotate to entire batch
):
aff_fs = partial(rotate_mat, max_deg=max_deg, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 171
def zoom_mat(
x:Tensor, # Input `Tensor`
min_zoom:float=1., # Minimum zoom
max_zoom:float=1.1, # Maximum zoom
p:float=0.5, # Probability of applying zoom
draw:float|MutableSequence|callable=None, # User defined scale of the zoom
draw_x:float|MutableSequence|callable=None, # User defined center of the zoom in x
draw_y:float|MutableSequence|callable=None, # User defined center of the zoom in y
batch:bool=False # Apply identical zoom to entire batch
):
"Return a random zoom matrix with `max_zoom` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(min_zoom, max_zoom)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(min_zoom, max_zoom)
def _def_draw_ctr(x): return x.new_empty(x.size(0)).uniform_(0,1)
def _def_draw_ctr_b(x): return x.new_zeros(x.size(0)) + random.uniform(0,1)
assert(min_zoom<=max_zoom)
s = 1/_draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, neutral=1., batch=batch)
def_draw_c = _def_draw_ctr_b if batch else _def_draw_ctr
col_pct = _draw_mask(x, def_draw_c, draw=draw_x, p=1., batch=batch)
row_pct = _draw_mask(x, def_draw_c, draw=draw_y, p=1., batch=batch)
col_c = (1-s) * (2*col_pct - 1)
row_c = (1-s) * (2*row_pct - 1)
return affine_mat(s, t0(s), col_c,
t0(s), s, row_c)
# %% ../../nbs/09_vision.augment.ipynb 172
@patch
@delegates(zoom_mat)
def zoom(x: TensorImage|TensorMask|TensorPoint|TensorBBox,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=PadMode.Reflection, # Padding applied to `x`
align_corners:bool=True, # PyTorch `F.grid_sample` align_corners
**kwargs
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
return x.affine_coord(mat=zoom_mat(x0, **kwargs)[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 173
class Zoom(AffineCoordTfm):
"Apply a random zoom of at most `max_zoom` with probability `p` to a batch of images"
def __init__(self,
min_zoom:float=1., # Minimum zoom
max_zoom:float=1.1, # Maximum zoom
p:float=0.5, # Probability of applying zoom
draw:float|MutableSequence|callable=None, # User defined scale of the zoom
draw_x:float|MutableSequence|callable=None, # User defined center of the zoom in x
draw_y:float|MutableSequence|callable=None, # User defined center of the zoom in y
size:int|tuple=None, # Output size, duplicated if one value is specified
mode='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
batch=False, # Apply identical zoom to entire batch
align_corners=True # PyTorch `F.grid_sample` align_corners
):
aff_fs = partial(zoom_mat, min_zoom=min_zoom, max_zoom=max_zoom, p=p, draw=draw, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 178
def _linalg_solve(A,B):
return torch.linalg.solve(A,B)
def _solve(A,B):
return torch.solve(B,A)[0]
if ismin_torch('1.9'): solve = _linalg_solve
else: solve = _solve
# %% ../../nbs/09_vision.augment.ipynb 179
def find_coeffs(
p1:Tensor, # Original points
p2:Tensor, # Target points
):
"Find coefficients for warp tfm from `p1` to `p2`"
m = []
p = p1[:,0,0]
#The equations we'll need to solve.
for i in range(p1.shape[1]):
m.append(stack([p2[:,i,0], p2[:,i,1], t1(p), t0(p), t0(p), t0(p), -p1[:,i,0]*p2[:,i,0], -p1[:,i,0]*p2[:,i,1]]))
m.append(stack([t0(p), t0(p), t0(p), p2[:,i,0], p2[:,i,1], t1(p), -p1[:,i,1]*p2[:,i,0], -p1[:,i,1]*p2[:,i,1]]))
#The 8 scalars we seek are solution of AX = B
A = stack(m).permute(2, 0, 1)
B = p1.view(p1.shape[0], 8, 1)
return solve(A,B)
# %% ../../nbs/09_vision.augment.ipynb 180
def apply_perspective(
coords:Tensor, # Original coordinates
coeffs:Tensor # Warping transformation matrice
):
"Apply perspective tranform on `coords` with `coeffs`"
sz = coords.shape
coords = coords.view(sz[0], -1, 2)
coeffs = torch.cat([coeffs, t1(coeffs[:,:1])], dim=1).view(coeffs.shape[0], 3,3)
coords1 = coords @ coeffs[...,:2].transpose(1,2) + coeffs[...,2].unsqueeze(1)
if (coords1[...,2]==0.).any(): return coords[...,:2].view(*sz)
coords = coords1/coords1[...,2].unsqueeze(-1)
return coords[...,:2].view(*sz)
# %% ../../nbs/09_vision.augment.ipynb 181
class _WarpCoord():
def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None, batch=False):
store_attr()
self.coeffs = None
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(-self.magnitude, self.magnitude)
return x.new_zeros(x.size(0)) + random.uniform(-self.magnitude, self.magnitude)
def before_call(self, x):
x_t = _draw_mask(x, self._def_draw, self.draw_x, p=self.p, batch=self.batch)
y_t = _draw_mask(x, self._def_draw, self.draw_y, p=self.p, batch=self.batch)
orig_pts = torch.tensor([[-1,-1], [-1,1], [1,-1], [1,1]], dtype=x.dtype, device=x.device)
self.orig_pts = orig_pts.unsqueeze(0).expand(x.size(0),4,2)
targ_pts = stack([stack([-1-y_t, -1-x_t]), stack([-1+y_t, 1+x_t]),
stack([ 1+y_t, -1+x_t]), stack([ 1-y_t, 1-x_t])])
self.targ_pts = targ_pts.permute(2,0,1)
def __call__(self, x, invert=False):
coeffs = find_coeffs(self.targ_pts, self.orig_pts) if invert else find_coeffs(self.orig_pts, self.targ_pts)
return apply_perspective(x, coeffs)
# %% ../../nbs/09_vision.augment.ipynb 182
@patch
@delegates(_WarpCoord.__init__)
def warp(x:TensorImage|TensorMask|TensorPoint|TensorBBox,
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation applied to `x`
pad_mode=PadMode.Reflection, # Padding applied to `x`
align_corners:bool=True, # PyTorch `F.grid_sample` align_corners
**kwargs
):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
coord_tfm = _WarpCoord(**kwargs)
coord_tfm.before_call(x0)
return x.affine_coord(coord_tfm=coord_tfm, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 185
class Warp(AffineCoordTfm):
"Apply perspective warping with `magnitude` and `p` on a batch of matrices"
def __init__(self,
magnitude:float=0.2, # The default warping magnitude
p:float=0.5, # Probability of applying warp
draw_x:float|MutableSequence|callable=None, # User defined warping magnitude in x
draw_y:float|MutableSequence|callable=None, # User defined warping magnitude in y
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
batch:bool=False, # Apply identical warp to entire batch
align_corners:bool=True # PyTorch `F.grid_sample` align_corners
):
store_attr()
coord_fs = _WarpCoord(magnitude=magnitude, p=p, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(coord_fs=coord_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners )
# %% ../../nbs/09_vision.augment.ipynb 193
@patch
def lighting(x: TensorImage, func): return torch.sigmoid(func(logit(x)))
# %% ../../nbs/09_vision.augment.ipynb 199
class SpaceTfm(RandTransform):
"Apply `fs` to the logits"
order = 40
def __init__(self,
fs:callable|MutableSequence, # Transformation functions applying in a space
space_fn:callable, # Function converting rgb to a space and back to rgb after appying `fs`
**kwargs
):
super().__init__(**kwargs)
self.space_fn=space_fn
self.fs=L(fs)
def before_call(self,
b,
split_idx:int, # Index of the train/valid dataset
):
self.do = True
while isinstance(b, tuple): b = b[0]
for t in self.fs: t.before_call(b)
def compose(self,
tfm:callable # Transformation function to compose
):
"Compose `self` with another `LightingTransform`"
self.fs += tfm.fs
def encodes(self,x:TensorImage): return self.space_fn(x,partial(compose_tfms, tfms=self.fs))
# %% ../../nbs/09_vision.augment.ipynb 201
class LightingTfm(SpaceTfm):
"Apply `fs` to the logits"
order = 40
def __init__(self,
fs:callable|MutableSequence, # Transformation functions applying in logit space,
**kwargs
):
super().__init__(fs, TensorImage.lighting, **kwargs)
# %% ../../nbs/09_vision.augment.ipynb 203
class _BrightnessLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
return x.new_zeros(x.size(0)) + random.uniform(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0.5, batch=self.batch)
def __call__(self, x): return x.add_(logit(self.change[:,None,None,None]))
# %% ../../nbs/09_vision.augment.ipynb 204
@patch
@delegates(_BrightnessLogit.__init__)
def brightness(x: TensorImage, **kwargs):
func = _BrightnessLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# %% ../../nbs/09_vision.augment.ipynb 205
class Brightness(LightingTfm):
def __init__(self,
max_lighting:float=0.2, # Maximum scale of changing brightness
p:float=0.75, # Probability of appying transformation
draw:float|MutableSequence|callable=None, # User defined behavior of batch transformation
batch=False # Apply identical brightness to entire batch
):
"Apply change in brightness of `max_lighting` to batch of images with probability `p`."
store_attr()
super().__init__(_BrightnessLogit(max_lighting, p, draw, batch))
# %% ../../nbs/09_vision.augment.ipynb 211
class _ContrastLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x): return x.mul_(self.change[:,None,None,None])
# %% ../../nbs/09_vision.augment.ipynb 212
@patch
@delegates(_ContrastLogit.__init__)
def contrast(x: TensorImage, **kwargs):
func = _ContrastLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# %% ../../nbs/09_vision.augment.ipynb 213
class Contrast(LightingTfm):
"Apply change in contrast of `max_lighting` to batch of images with probability `p`."
def __init__(self,
max_lighting=0.2, # Maximum scale of changing contrast
p=0.75, # Probability of appying transformation
draw:float|MutableSequence|callable=None, # User defined behavior of batch transformation
batch=False
):
store_attr()
super().__init__(_ContrastLogit(max_lighting, p, draw, batch))
# %% ../../nbs/09_vision.augment.ipynb 218
def grayscale(x):
"Tensor to grayscale tensor. Uses the ITU-R 601-2 luma transform. "
return (x*torch.tensor([0.2989,0.5870,0.1140],device=x.device)[...,None,None]).sum(1)[:,None]
# %% ../../nbs/09_vision.augment.ipynb 221
class _SaturationLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x):
#interpolate between grayscale and original in-place
gs = grayscale(x)
gs.mul_(1-self.change[:,None,None,None])
x.mul_(self.change[:,None,None,None])
return x.add_(gs)
# %% ../../nbs/09_vision.augment.ipynb 222
@patch
@delegates(_SaturationLogit.__init__)
def saturation(x: TensorImage, **kwargs):
func = _SaturationLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# %% ../../nbs/09_vision.augment.ipynb 223
class Saturation(LightingTfm):
"Apply change in saturation of `max_lighting` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_saturation
def __init__(self,
max_lighting:float=0.2, # Maximum scale of changing brightness
p:float=0.75, # Probability of appying transformation
draw:float|MutableSequence|callable=None, # User defined behavior of batch transformation
batch:bool=False # Apply identical saturation to entire batch
):
store_attr()
super().__init__(_SaturationLogit(max_lighting, p, draw, batch))
# %% ../../nbs/09_vision.augment.ipynb 230
def rgb2hsv(
img:Tensor # Batch of images `Tensor`in RGB
):
"Converts a RGB image to an HSV image. Note: Will not work on logit space images."
r, g, b = img.unbind(1)
# temp commented out due to https://github.com/pytorch/pytorch/issues/47069
# maxc = torch.max(img, dim=1).values
# minc = torch.min(img, dim=1).values
maxc = torch.max(img, dim=1)[0]
minc = torch.min(img, dim=1)[0]
eqc = maxc == minc
cr = maxc - minc
s = cr / torch.where(eqc, maxc.new_ones(()), maxc)
cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = (hr + hg + hb)
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc),dim=1)
# %% ../../nbs/09_vision.augment.ipynb 231
def hsv2rgb(
img:Tensor, # Batch of images `Tensor in HSV`
):
"Converts a HSV image to an RGB image."
h, s, v = img.unbind(1)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = i[:,None] == torch.arange(6,device=i.device)[:, None, None][None]
a1 = torch.stack((v, q, p, p, t, v),dim=1)
a2 = torch.stack((t, v, v, q, p, p),dim=1)
a3 = torch.stack((p, p, t, v, v, q),dim=1)
a4 = torch.stack((a1, a2, a3),dim=1)
return torch.einsum("nijk, nxijk -> nxjk", mask.to(dtype=img.dtype), a4)
# %% ../../nbs/09_vision.augment.ipynb 233
@patch
def hsv(x: TensorImage, func): return TensorImage(hsv2rgb(func(rgb2hsv(x))))
# %% ../../nbs/09_vision.augment.ipynb 234
class HSVTfm(SpaceTfm):
"Apply `fs` to the images in HSV space"
def __init__(self, fs, **kwargs):
super().__init__(fs, TensorImage.hsv, **kwargs)
# %% ../../nbs/09_vision.augment.ipynb 238
class _Hue():
def __init__(self, max_hue=0.1, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_hue), -math.log(1-self.max_hue))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_hue), -math.log(1-self.max_hue))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0., batch=self.batch)
def __call__(self, x):
h,s,v = x.unbind(1)
h += self.change[:,None,None]
h = h % 1.0
return x.set_(torch.stack((h, s, v),dim=1))
# %% ../../nbs/09_vision.augment.ipynb 239
@patch
@delegates(_Hue.__init__)
def hue(x: TensorImage, **kwargs):
func = _Hue(**kwargs)
func.before_call(x)
return TensorImage(x.hsv(func))
# %% ../../nbs/09_vision.augment.ipynb 240
class Hue(HSVTfm):
"Apply change in hue of `max_hue` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_hue
def __init__(self,
max_hue:float=0.1, # Maximum scale of changing Hue
p:float=0.75, # Probability of appying transformation
draw:float|MutableSequence|callable=None, # User defined behavior of batch transformation
batch=False # Apply identical Hue to entire batch
):
super().__init__(_Hue(max_hue, p, draw, batch))
# %% ../../nbs/09_vision.augment.ipynb 246
def cutout_gaussian(
x:Tensor, # Input image
areas:list # List of areas to cutout. Order rl,rh,cl,ch
):
"Replace all `areas` in `x` with N(0,1) noise"
chan,img_h,img_w = x.shape[-3:]
for rl,rh,cl,ch in areas: x[..., rl:rh, cl:ch].normal_()
return x
# %% ../../nbs/09_vision.augment.ipynb 248
def norm_apply_denorm(
x:Tensor, # Input Image
f:callable, # Function to apply
nrm:callable # Normalization transformation
):
"Normalize `x` with `nrm`, then apply `f`, then denormalize"
y = f(nrm(x.clone()))
return nrm.decode(y).clamp(0,1)
# %% ../../nbs/09_vision.augment.ipynb 251
def _slice(area, sz):
bound = int(round(math.sqrt(area)))
loc = random.randint(0, max(sz-bound, 0))
return loc,loc+bound
# %% ../../nbs/09_vision.augment.ipynb 252
class RandomErasing(RandTransform):
"Randomly selects a rectangle region in an image and randomizes its pixels."
order = 100 # After Normalize
def __init__(self,
p:float=0.5, # Probability of appying Random Erasing
sl:float=0., # Minimum proportion of erased area
sh:float=0.3, # Maximum proportion of erased area
min_aspect:float=0.3, # Minimum aspect ratio of erased area
max_count:int=1 # Maximum number of erasing blocks per image, area per box is scaled by count
):
store_attr()
super().__init__(p=p)
self.log_ratio = (math.log(min_aspect), math.log(1/min_aspect))
def _bounds(self, area, img_h, img_w):
r_area = random.uniform(self.sl,self.sh) * area
aspect = math.exp(random.uniform(*self.log_ratio))
return _slice(r_area*aspect, img_h) + _slice(r_area/aspect, img_w)
def encodes(self,x:TensorImage):
count = random.randint(1, self.max_count)
_,img_h,img_w = x.shape[-3:]
area = img_h*img_w/count
areas = [self._bounds(area, img_h, img_w) for _ in range(count)]
return cutout_gaussian(x, areas)
# %% ../../nbs/09_vision.augment.ipynb 257
def _compose_same_tfms(tfms):
tfms = L(tfms)
if len(tfms) == 0: return None
res = tfms[0]
for tfm in tfms[1:]: res.compose(tfm)
return res
# %% ../../nbs/09_vision.augment.ipynb 258
def setup_aug_tfms(tfms):
"Go through `tfms` and combines together affine/coord or lighting transforms"
aff_tfms = [tfm for tfm in tfms if isinstance(tfm, AffineCoordTfm)]
lig_tfms = [tfm for tfm in tfms if isinstance(tfm, LightingTfm)]
others = [tfm for tfm in tfms if tfm not in aff_tfms+lig_tfms]
lig_tfm = _compose_same_tfms(lig_tfms)
aff_tfm = _compose_same_tfms(aff_tfms)
res = [aff_tfm] if aff_tfm is not None else []
if lig_tfm is not None: res.append(lig_tfm)
return res + others
# %% ../../nbs/09_vision.augment.ipynb 262
def aug_transforms(
mult:float=1.0, # Multiplication applying to `max_rotate`,`max_lighting`,`max_warp`
do_flip:bool=True, # Random flipping
flip_vert:bool=False, # Flip vertically
max_rotate:float=10., # Maximum degree of rotation
min_zoom:float=1., # Minimum zoom
max_zoom:float=1.1, # Maximum zoom
max_lighting:float=0.2, # Maximum scale of changing brightness
max_warp:float=0.2, # Maximum value of changing warp per
p_affine:float=0.75, # Probability of applying affine transformation
p_lighting:float=0.75, # Probability of changing brightnest and contrast
xtra_tfms:list=None, # Custom Transformations
size:int|tuple=None, # Output size, duplicated if one value is specified
mode:str='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
align_corners=True, # PyTorch `F.grid_sample` align_corners
batch=False, # Apply identical transformation to entire batch
min_scale=1. # Minimum scale of the crop, in relation to image area
):
"Utility func to easily create a list of flip, rotate, zoom, warp, lighting transforms."
res,tkw = [],dict(size=size if min_scale==1. else None, mode=mode, pad_mode=pad_mode, batch=batch, align_corners=align_corners)
max_rotate,max_lighting,max_warp = array([max_rotate,max_lighting,max_warp])*mult
if do_flip: res.append(Dihedral(p=0.5, **tkw) if flip_vert else Flip(p=0.5, **tkw))
if max_warp: res.append(Warp(magnitude=max_warp, p=p_affine, **tkw))
if max_rotate: res.append(Rotate(max_deg=max_rotate, p=p_affine, **tkw))
if min_zoom<1 or max_zoom>1: res.append(Zoom(min_zoom=min_zoom, max_zoom=max_zoom, p=p_affine, **tkw))
if max_lighting:
res.append(Brightness(max_lighting=max_lighting, p=p_lighting, batch=batch))
res.append(Contrast(max_lighting=max_lighting, p=p_lighting, batch=batch))
if min_scale!=1.: xtra_tfms = RandomResizedCropGPU(size, min_scale=min_scale, ratio=(1,1)) + L(xtra_tfms)
return setup_aug_tfms(res + L(xtra_tfms))
| 57,110 | 43.898585 | 134 | py |
fastai | fastai-master/fastai/vision/utils.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/09b_vision.utils.ipynb.
# %% ../../nbs/09b_vision.utils.ipynb 3
from __future__ import annotations
import uuid
from ..torch_basics import *
from ..data.all import *
from .core import *
from fastdownload import download_url
from pathlib import Path
# %% auto 0
__all__ = ['download_images', 'resize_to', 'verify_image', 'verify_images', 'resize_image', 'resize_images']
# %% ../../nbs/09b_vision.utils.ipynb 6
def _get_downloaded_image_filename(dest, name, suffix):
start_index = 1
candidate_name = name
while (dest/f"{candidate_name}{suffix}").is_file():
candidate_name = f"{candidate_name}{start_index}"
start_index += 1
return candidate_name
# %% ../../nbs/09b_vision.utils.ipynb 7
def _download_image_inner(dest, inp, timeout=4, preserve_filename=False):
i,url = inp
url = url.split("?")[0]
url_path = Path(url)
suffix = url_path.suffix if url_path.suffix else '.jpg'
name = _get_downloaded_image_filename(dest, url_path.stem, suffix) if preserve_filename else str(uuid.uuid4())
try: download_url(url, dest/f"{name}{suffix}", show_progress=False, timeout=timeout)
except Exception as e: f"Couldn't download {url}."
# %% ../../nbs/09b_vision.utils.ipynb 9
def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4, preserve_filename=False):
"Download images listed in text file `url_file` to path `dest`, at most `max_pics`"
if urls is None: urls = url_file.read_text().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout, preserve_filename=preserve_filename),
list(enumerate(urls)), n_workers=n_workers, threadpool=True)
# %% ../../nbs/09b_vision.utils.ipynb 11
def resize_to(img, targ_sz, use_min=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
# %% ../../nbs/09b_vision.utils.ipynb 13
def verify_image(fn):
"Confirm that `fn` can be opened"
try:
im = Image.open(fn)
im.draft(im.mode, (32,32))
im.load()
return True
except: return False
# %% ../../nbs/09b_vision.utils.ipynb 14
def verify_images(fns):
"Find images in `fns` that can't be opened"
return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
# %% ../../nbs/09b_vision.utils.ipynb 15
def resize_image(file, dest, src='.', max_size=None, n_channels=3, ext=None,
img_format=None, resample=BILINEAR, resume=False, **kwargs ):
"Resize file to dest to max_size"
dest = Path(dest)
dest_fname = dest/file
dest_fname.parent.mkdir(exist_ok=True, parents=True)
file = Path(src)/file
if resume and dest_fname.exists(): return
if not verify_image(file): return
img = Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=resample)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
elif file != dest_fname : shutil.copy2(file, dest_fname)
# %% ../../nbs/09b_vision.utils.ipynb 18
def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False,
dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=BILINEAR,
resume=None, **kwargs):
"Resize files on path recursively to dest to max_size"
path = Path(path)
if resume is None and dest != Path('.'): resume=False
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
files = [o.relative_to(path) for o in files]
parallel(resize_image, files, src=path, n_workers=max_workers, max_size=max_size, dest=dest, n_channels=n_channels, ext=ext,
img_format=img_format, resample=resample, resume=resume, **kwargs)
| 4,333 | 40.673077 | 128 | py |
fastai | fastai-master/fastai/vision/gan.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/24_vision.gan.ipynb.
# %% ../../nbs/24_vision.gan.ipynb 2
from __future__ import annotations
from ..basics import *
from .all import *
# %% auto 0
__all__ = ['GANModule', 'basic_critic', 'AddChannels', 'basic_generator', 'DenseResBlock', 'gan_critic', 'GANLoss',
'AdaptiveLoss', 'accuracy_thresh_expand', 'set_freeze_model', 'GANTrainer', 'FixedGANSwitcher',
'AdaptiveGANSwitcher', 'GANDiscriminativeLR', 'InvisibleTensor', 'generate_noise', 'show_batch',
'show_results', 'gan_loss_from_func', 'GANLearner']
# %% ../../nbs/24_vision.gan.ipynb 9
class GANModule(Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self,
generator:nn.Module=None, # The generator PyTorch module
critic:nn.Module=None, # The discriminator PyTorch module
gen_mode:None|bool=False # Whether the GAN should be set to generator mode
):
if generator is not None: self.generator=generator
if critic is not None: self.critic =critic
store_attr('gen_mode')
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self,
gen_mode:None|bool=None # Whether the GAN should be set to generator mode
):
"Put the module in generator mode if `gen_mode` is `True`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
# %% ../../nbs/24_vision.gan.ipynb 13
@delegates(ConvLayer.__init__)
def basic_critic(
in_size:int, # Input size for the critic (same as the output size of the generator)
n_channels:int, # Number of channels of the input for the critic
n_features:int=64, # Number of features used in the critic
n_extra_layers:int=0, # Number of extra hidden layers in the critic
norm_type:NormType=NormType.Batch, # Type of normalization to use in the critic
**kwargs
) -> nn.Sequential:
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)]
while cur_size > 4:
layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()]
return nn.Sequential(*layers)
# %% ../../nbs/24_vision.gan.ipynb 14
class AddChannels(Module):
"Add `n_dim` channels at the end of the input."
def __init__(self, n_dim): self.n_dim=n_dim
def forward(self, x): return x.view(*(list(x.shape)+[1]*self.n_dim))
# %% ../../nbs/24_vision.gan.ipynb 15
@delegates(ConvLayer.__init__)
def basic_generator(
out_size:int, # Output size for the generator (same as the input size for the critic)
n_channels:int, # Number of channels of the output of the generator
in_sz:int=100, # Size of the input noise vector for the generator
n_features:int=64, # Number of features used in the generator
n_extra_layers:int=0, # Number of extra hidden layers in the generator
**kwargs
) -> nn.Sequential:
"A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2
layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)]
cur_size = 4
while cur_size < out_size // 2:
layers.append(ConvLayer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)]
layers += [nn.ConvTranspose2d(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
# %% ../../nbs/24_vision.gan.ipynb 17
_conv_args = dict(act_cls = partial(nn.LeakyReLU, negative_slope=0.2), norm_type=NormType.Spectral)
def _conv(ni, nf, ks=3, stride=1, self_attention=False, **kwargs):
if self_attention: kwargs['xtra'] = SelfAttention(nf)
return ConvLayer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)
# %% ../../nbs/24_vision.gan.ipynb 18
@delegates(ConvLayer)
def DenseResBlock(
nf:int, # Number of features
norm_type:NormType=NormType.Batch, # Normalization type
**kwargs
) -> SequentialEx:
"Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`."
return SequentialEx(ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
MergeLayer(dense=True))
# %% ../../nbs/24_vision.gan.ipynb 19
def gan_critic(
n_channels:int=3, # Number of channels of the input for the critic
nf:int=128, # Number of features for the critic
n_blocks:int=3, # Number of ResNet blocks within the critic
p:float=0.15 # Amount of dropout in the critic
) -> nn.Sequential:
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
DenseResBlock(nf, **_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
ConvLayer(nf, 1, ks=4, bias=False, padding=0, norm_type=NormType.Spectral, act_cls=None),
Flatten()]
return nn.Sequential(*layers)
# %% ../../nbs/24_vision.gan.ipynb 20
class GANLoss(GANModule):
"Wrapper around `crit_loss_func` and `gen_loss_func`"
def __init__(self,
gen_loss_func:callable, # Generator loss function
crit_loss_func:callable, # Critic loss function
gan_model:GANModule # The GAN model
):
super().__init__()
store_attr('gen_loss_func,crit_loss_func,gan_model')
def generator(self,
output, # Generator outputs
target # Real images
):
"Evaluate the `output` with the critic then uses `self.gen_loss_func` to evaluate how well the critic was fooled by `output`"
fake_pred = self.gan_model.critic(output)
self.gen_loss = self.gen_loss_func(fake_pred, output, target)
return self.gen_loss
def critic(self,
real_pred, # Critic predictions for real images
input # Input noise vector to pass into generator
):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`."
fake = self.gan_model.generator(input).requires_grad_(False)
fake_pred = self.gan_model.critic(fake)
self.crit_loss = self.crit_loss_func(real_pred, fake_pred)
return self.crit_loss
# %% ../../nbs/24_vision.gan.ipynb 24
class AdaptiveLoss(Module):
"Expand the `target` to match the `output` size before applying `crit`."
def __init__(self, crit:callable): self.crit = crit
def forward(self, output:Tensor, target:Tensor):
return self.crit(output, target[:,None].expand_as(output).float())
# %% ../../nbs/24_vision.gan.ipynb 25
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True):
"Compute thresholded accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh).byte()==y_true[:,None].expand_as(y_pred).byte()).float().mean()
# %% ../../nbs/24_vision.gan.ipynb 27
def set_freeze_model(
m:nn.Module, # Model to freeze/unfreeze
rg:bool # `Requires grad` argument. `True` for freeze
):
for p in m.parameters(): p.requires_grad_(rg)
# %% ../../nbs/24_vision.gan.ipynb 28
class GANTrainer(Callback):
"Callback to handle GAN Training."
run_after = TrainEvalCallback
def __init__(self,
switch_eval:bool=False, # Whether the model should be set to eval mode when calculating loss
clip:None|float=None, # How much to clip the weights
beta:float=0.98, # Exponentially weighted smoothing of the losses `beta`
gen_first:bool=False, # Whether we start with generator training
show_img:bool=True, # Whether to show example generated images during training
):
store_attr('switch_eval,clip,gen_first,show_img')
self.gen_loss,self.crit_loss = AvgSmoothLoss(beta=beta),AvgSmoothLoss(beta=beta)
def _set_trainable(self):
"Appropriately set the generator and critic into a trainable or loss evaluation mode based on `self.gen_mode`."
train_model = self.generator if self.gen_mode else self.critic
loss_model = self.generator if not self.gen_mode else self.critic
set_freeze_model(train_model, True)
set_freeze_model(loss_model, False)
if self.switch_eval:
train_model.train()
loss_model.eval()
def before_fit(self):
"Initialization."
self.generator,self.critic = self.model.generator,self.model.critic
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.crit_losses,self.gen_losses = [],[]
self.gen_loss.reset() ; self.crit_loss.reset()
#self.recorder.no_val=True
#self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
#self.imgs,self.titles = [],[]
def before_validate(self):
"Switch in generator mode for showing results."
self.switch(gen_mode=True)
def before_batch(self):
"Clamp the weights with `self.clip` if it's not None, set the correct input/target."
if self.training and self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
if not self.gen_mode:
(self.learn.xb,self.learn.yb) = (self.yb,self.xb)
def after_batch(self):
"Record `last_loss` in the proper list."
if not self.training: return
if self.gen_mode:
self.gen_loss.accumulate(self.learn)
self.gen_losses.append(self.gen_loss.value)
self.last_gen = self.learn.to_detach(self.pred)
else:
self.crit_loss.accumulate(self.learn)
self.crit_losses.append(self.crit_loss.value)
def before_epoch(self):
"Put the critic or the generator back to eval if necessary."
self.switch(self.gen_mode)
#def after_epoch(self):
# "Show a sample image."
# if not hasattr(self, 'last_gen') or not self.show_img: return
# data = self.learn.data
# img = self.last_gen[0]
# norm = getattr(data,'norm',False)
# if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
# img = data.train_ds.y.reconstruct(img)
# self.imgs.append(img)
# self.titles.append(f'Epoch {epoch}')
# pbar.show_imgs(self.imgs, self.titles)
# return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
def switch(self, gen_mode=None):
"Switch the model and loss function, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
# %% ../../nbs/24_vision.gan.ipynb 30
class FixedGANSwitcher(Callback):
"Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator."
run_after = GANTrainer
def __init__(self,
n_crit:int=1, # How many steps of critic training before switching to generator
n_gen:int=1 # How many steps of generator training before switching to critic
):
store_attr('n_crit,n_gen')
def before_train(self): self.n_c,self.n_g = 0,0
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c
target = n_iter if isinstance(n_iter, int) else n_iter(n_in)
if target == n_out:
self.learn.gan_trainer.switch()
self.n_c,self.n_g = 0,0
# %% ../../nbs/24_vision.gan.ipynb 31
class AdaptiveGANSwitcher(Callback):
"Switcher that goes back to generator/critic when the loss goes below `gen_thresh`/`crit_thresh`."
run_after = GANTrainer
def __init__(self,
gen_thresh:None|float=None, # Loss threshold for generator
critic_thresh:None|float=None # Loss threshold for critic
):
store_attr('gen_thresh,critic_thresh')
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.gan_trainer.gen_mode:
if self.gen_thresh is None or self.loss < self.gen_thresh: self.gan_trainer.switch()
else:
if self.critic_thresh is None or self.loss < self.critic_thresh: self.gan_trainer.switch()
# %% ../../nbs/24_vision.gan.ipynb 32
class GANDiscriminativeLR(Callback):
"`Callback` that handles multiplying the learning rate by `mult_lr` for the critic."
run_after = GANTrainer
def __init__(self, mult_lr=5.): self.mult_lr = mult_lr
def before_batch(self):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and self.training:
self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']*self.mult_lr)
def after_batch(self):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']/self.mult_lr)
# %% ../../nbs/24_vision.gan.ipynb 34
class InvisibleTensor(TensorBase):
"TensorBase but show method does nothing"
def show(self, ctx=None, **kwargs): return ctx
# %% ../../nbs/24_vision.gan.ipynb 35
def generate_noise(
fn, # Dummy argument so it works with `DataBlock`
size=100 # Size of returned noise vector
) -> InvisibleTensor:
"Generate noise vector."
return cast(torch.randn(size), InvisibleTensor)
# %% ../../nbs/24_vision.gan.ipynb 37
@typedispatch
def show_batch(x:InvisibleTensor, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# %% ../../nbs/24_vision.gan.ipynb 38
@typedispatch
def show_results(x:InvisibleTensor, y:TensorImage, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# %% ../../nbs/24_vision.gan.ipynb 45
def gan_loss_from_func(
loss_gen:callable, # A loss function for the generator. Evaluates generator output images and target real images
loss_crit:callable, # A loss function for the critic. Evaluates predictions of real and fake images.
weights_gen:None|MutableSequence|tuple=None # Weights for the generator and critic loss function
):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
# %% ../../nbs/24_vision.gan.ipynb 46
def _tk_mean(fake_pred, output, target): return fake_pred.mean()
def _tk_diff(real_pred, fake_pred): return real_pred.mean() - fake_pred.mean()
# %% ../../nbs/24_vision.gan.ipynb 47
@delegates()
class GANLearner(Learner):
"A `Learner` suitable for GANs."
def __init__(self,
dls:DataLoaders, # DataLoaders object for GAN data
generator:nn.Module, # Generator model
critic:nn.Module, # Critic model
gen_loss_func:callable, # Generator loss function
crit_loss_func:callable, # Critic loss function
switcher:Callback|None=None, # Callback for switching between generator and critic training, defaults to `FixedGANSwitcher`
gen_first:bool=False, # Whether we start with generator training
switch_eval:bool=True, # Whether the model should be set to eval mode when calculating loss
show_img:bool=True, # Whether to show example generated images during training
clip:None|float=None, # How much to clip the weights
cbs:Callback|None|MutableSequence=None, # Additional callbacks
metrics:None|MutableSequence|callable=None, # Metrics
**kwargs
):
gan = GANModule(generator, critic)
loss_func = GANLoss(gen_loss_func, crit_loss_func, gan)
if switcher is None: switcher = FixedGANSwitcher()
trainer = GANTrainer(clip=clip, switch_eval=switch_eval, gen_first=gen_first, show_img=show_img)
cbs = L(cbs) + L(trainer, switcher)
metrics = L(metrics) + L(*LossMetrics('gen_loss,crit_loss'))
super().__init__(dls, gan, loss_func=loss_func, cbs=cbs, metrics=metrics, **kwargs)
@classmethod
def from_learners(cls,
gen_learn:Learner, # A `Learner` object that contains the generator
crit_learn:Learner, # A `Learner` object that contains the critic
switcher:Callback|None=None, # Callback for switching between generator and critic training, defaults to `FixedGANSwitcher`
weights_gen:None|MutableSequence|tuple=None, # Weights for the generator and critic loss function
**kwargs
):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(gen_learn.loss_func, crit_learn.loss_func, weights_gen=weights_gen)
return cls(gen_learn.dls, gen_learn.model, crit_learn.model, *losses, switcher=switcher, **kwargs)
@classmethod
def wgan(cls,
dls:DataLoaders, # DataLoaders object for GAN data
generator:nn.Module, # Generator model
critic:nn.Module, # Critic model
switcher:Callback|None=None, # Callback for switching between generator and critic training, defaults to `FixedGANSwitcher(n_crit=5, n_gen=1)`
clip:None|float=0.01, # How much to clip the weights
switch_eval:bool=False, # Whether the model should be set to eval mode when calculating loss
**kwargs
):
"Create a [WGAN](https://arxiv.org/abs/1701.07875) from `dls`, `generator` and `critic`."
if switcher is None: switcher = FixedGANSwitcher(n_crit=5, n_gen=1)
return cls(dls, generator, critic, _tk_mean, _tk_diff, switcher=switcher, clip=clip, switch_eval=switch_eval, **kwargs)
GANLearner.from_learners = delegates(to=GANLearner.__init__)(GANLearner.from_learners)
GANLearner.wgan = delegates(to=GANLearner.__init__)(GANLearner.wgan)
| 19,560 | 45.463183 | 150 | py |
fastai | fastai-master/fastai/vision/data.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/08_vision.data.ipynb.
# %% ../../nbs/08_vision.data.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
from .core import *
import types
# %% auto 0
__all__ = ['PointBlock', 'BBoxBlock', 'get_grid', 'clip_remove_empty', 'bb_pad', 'show_batch', 'ImageBlock', 'MaskBlock',
'BBoxLblBlock', 'ImageDataLoaders', 'SegmentationDataLoaders']
# %% ../../nbs/08_vision.data.ipynb 7
@delegates(subplots)
def get_grid(
n:int, # Number of axes in the returned grid
nrows:int=None, # Number of rows in the returned grid, defaulting to `int(math.sqrt(n))`
ncols:int=None, # Number of columns in the returned grid, defaulting to `ceil(n/rows)`
figsize:tuple=None, # Width, height in inches of the returned figure
double:bool=False, # Whether to double the number of columns and `n`
title:str=None, # If passed, title set to the figure
return_fig:bool=False, # Whether to return the figure created by `subplots`
flatten:bool=True, # Whether to flatten the matplot axes such that they can be iterated over with a single loop
**kwargs,
) -> (plt.Figure, plt.Axes): # Returns just `axs` by default, and (`fig`, `axs`) if `return_fig` is set to True
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
# %% ../../nbs/08_vision.data.ipynb 9
def clip_remove_empty(
bbox:TensorBBox, # Coordinates of bounding boxes
label:TensorMultiCategory # Labels of the bounding boxes
):
"Clip bounding boxes with image border and remove empty boxes along with corresponding labels"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
# %% ../../nbs/08_vision.data.ipynb 12
def bb_pad(
samples:list, # List of 3-tuples like (image, bounding_boxes, labels)
pad_idx=0 # Label that will be used to pad each list of labels
):
"Function that collects `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
# %% ../../nbs/08_vision.data.ipynb 16
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# %% ../../nbs/08_vision.data.ipynb 17
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
# %% ../../nbs/08_vision.data.ipynb 20
def ImageBlock(cls:PILBase=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
# %% ../../nbs/08_vision.data.ipynb 21
def MaskBlock(
codes:list=None # Vocab labels for segmentation masks
):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
# %% ../../nbs/08_vision.data.ipynb 22
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
# %% ../../nbs/08_vision.data.ipynb 25
def BBoxLblBlock(
vocab:list=None, # Vocab labels for bounding boxes
add_na:bool=True # Add NaN as a background class
):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
# %% ../../nbs/08_vision.data.ipynb 28
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, img_cls=PILImage, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock(img_cls), CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None,
img_cls=PILImage, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock(img_cls), CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls,
path:str|Path, # Set the default path to a directory that a `Learner` can use to save files like models
fnames:list, # A list of `os.Pathlike`'s to individual image files
label_func:callable, # A function that receives a string (the file name) and outputs a label
**kwargs
) -> DataLoaders:
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, img_cls=PILImage, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock(img_cls), y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
img_cls=PILImage, **kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock(img_cls), y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
# %% ../../nbs/08_vision.data.ipynb 62
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None,
img_cls=PILImage, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock(img_cls), MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
| 11,758 | 52.45 | 137 | py |
fastai | fastai-master/fastai/vision/models/tvm.py | from torchvision.models import *
import types as _t
_g = globals()
for _k, _v in list(_g.items()):
if (
isinstance(_v, _t.ModuleType) and _v.__name__.startswith("torchvision.models")
) or (callable(_v) and _v.__module__ == "torchvision.models._api"):
del _g[_k]
del _k, _v, _g, _t
| 307 | 24.666667 | 86 | py |
fastai | fastai-master/fastai/vision/models/xresnet.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../../nbs/11_vision.models.xresnet.ipynb.
# %% ../../../nbs/11_vision.models.xresnet.ipynb 2
from __future__ import annotations
from ...torch_basics import *
try: from torchvision.models.utils import load_state_dict_from_url
except ModuleNotFoundError: from torch.hub import load_state_dict_from_url
# %% auto 0
__all__ = ['se_kwargs1', 'se_kwargs2', 'se_kwargs3', 'g0', 'g1', 'g2', 'g3', 'init_cnn', 'XResNet', 'xresnet18', 'xresnet34',
'xresnet50', 'xresnet101', 'xresnet152', 'xresnet18_deep', 'xresnet34_deep', 'xresnet50_deep',
'xresnet18_deeper', 'xresnet34_deeper', 'xresnet50_deeper', 'xse_resnet18', 'xse_resnext18', 'xresnext18',
'xse_resnet34', 'xse_resnext34', 'xresnext34', 'xse_resnet50', 'xse_resnext50', 'xresnext50',
'xse_resnet101', 'xse_resnext101', 'xresnext101', 'xse_resnet152', 'xsenet154', 'xse_resnext18_deep',
'xse_resnext34_deep', 'xse_resnext50_deep', 'xse_resnext18_deeper', 'xse_resnext34_deeper',
'xse_resnext50_deeper']
# %% ../../../nbs/11_vision.models.xresnet.ipynb 5
def init_cnn(m):
if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv1d,nn.Conv2d,nn.Conv3d,nn.Linear)): nn.init.kaiming_normal_(m.weight)
for l in m.children(): init_cnn(l)
# %% ../../../nbs/11_vision.models.xresnet.ipynb 6
class XResNet(nn.Sequential):
@delegates(ResBlock)
def __init__(self, block, expansion, layers, p=0.0, c_in=3, n_out=1000, stem_szs=(32,32,64),
widen=1.0, sa=False, act_cls=defaults.activation, ndim=2, ks=3, stride=2, **kwargs):
store_attr('block,expansion,act_cls,ndim,ks')
if ks % 2 == 0: raise Exception('kernel size has to be odd!')
stem_szs = [c_in, *stem_szs]
stem = [ConvLayer(stem_szs[i], stem_szs[i+1], ks=ks, stride=stride if i==0 else 1,
act_cls=act_cls, ndim=ndim)
for i in range(3)]
block_szs = [int(o*widen) for o in [64,128,256,512] +[256]*(len(layers)-4)]
block_szs = [64//expansion] + block_szs
blocks = self._make_blocks(layers, block_szs, sa, stride, **kwargs)
super().__init__(
*stem, MaxPool(ks=ks, stride=stride, padding=ks//2, ndim=ndim),
*blocks,
AdaptiveAvgPool(sz=1, ndim=ndim), Flatten(), nn.Dropout(p),
nn.Linear(block_szs[-1]*expansion, n_out),
)
init_cnn(self)
def _make_blocks(self, layers, block_szs, sa, stride, **kwargs):
return [self._make_layer(ni=block_szs[i], nf=block_szs[i+1], blocks=l,
stride=1 if i==0 else stride, sa=sa and i==len(layers)-4, **kwargs)
for i,l in enumerate(layers)]
def _make_layer(self, ni, nf, blocks, stride, sa, **kwargs):
return nn.Sequential(
*[self.block(self.expansion, ni if i==0 else nf, nf, stride=stride if i==0 else 1,
sa=sa and i==(blocks-1), act_cls=self.act_cls, ndim=self.ndim, ks=self.ks, **kwargs)
for i in range(blocks)])
# %% ../../../nbs/11_vision.models.xresnet.ipynb 7
def _xresnet(pretrained, expansion, layers, **kwargs):
# TODO pretrain all sizes. Currently will fail with non-xrn50
url = 'https://s3.amazonaws.com/fast-ai-modelzoo/xrn50_940.pth'
res = XResNet(ResBlock, expansion, layers, **kwargs)
if pretrained: res.load_state_dict(load_state_dict_from_url(url, map_location='cpu')['model'], strict=False)
return res
def xresnet18 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2, 2, 2, 2], **kwargs)
def xresnet34 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3, 4, 6, 3], **kwargs)
def xresnet50 (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 6, 3], **kwargs)
def xresnet101(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 23, 3], **kwargs)
def xresnet152(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 8, 36, 3], **kwargs)
def xresnet18_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,2,2,1,1], **kwargs)
def xresnet34_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1], **kwargs)
def xresnet50_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1], **kwargs)
def xresnet18_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,1,1,1,1,1,1], **kwargs)
def xresnet34_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1,1,1], **kwargs)
def xresnet50_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1,1,1], **kwargs)
# %% ../../../nbs/11_vision.models.xresnet.ipynb 8
se_kwargs1 = dict(groups=1 , reduction=16)
se_kwargs2 = dict(groups=32, reduction=16)
se_kwargs3 = dict(groups=32, reduction=0)
g0 = [2,2,2,2]
g1 = [3,4,6,3]
g2 = [3,4,23,3]
g3 = [3,8,36,3]
# %% ../../../nbs/11_vision.models.xresnet.ipynb 9
def xse_resnet18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g0, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g2, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet152(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g3, n_out=n_out, **se_kwargs1, **kwargs)
def xsenet154(n_out=1000, pretrained=False, **kwargs):
return XResNet(SEBlock, g3, groups=64, reduction=16, p=0.2, n_out=n_out)
def xse_resnext18_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext18_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [2,2,1,1,1,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
| 7,766 | 69.609091 | 156 | py |
fastai | fastai-master/fastai/vision/models/unet.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../../nbs/15a_vision.models.unet.ipynb.
# %% ../../../nbs/15a_vision.models.unet.ipynb 1
from __future__ import annotations
from ...torch_basics import *
from ...callback.hook import *
# %% auto 0
__all__ = ['UnetBlock', 'ResizeToOrig', 'DynamicUnet']
# %% ../../../nbs/15a_vision.models.unet.ipynb 5
def _get_sz_change_idxs(sizes):
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sz_chg_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
return sz_chg_idxs
# %% ../../../nbs/15a_vision.models.unet.ipynb 7
class UnetBlock(Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
@delegates(ConvLayer.__init__)
def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation,
self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type)
self.bn = BatchNorm(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, norm_type=norm_type, **kwargs)
self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, norm_type=norm_type,
xtra=SelfAttention(nf) if self_attention else None, **kwargs)
self.relu = act_cls()
apply_init(nn.Sequential(self.conv1, self.conv2), init)
def forward(self, up_in):
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
# %% ../../../nbs/15a_vision.models.unet.ipynb 8
class ResizeToOrig(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, mode='nearest'): self.mode = mode
def forward(self, x):
if x.orig.shape[-2:] != x.shape[-2:]:
x = F.interpolate(x, x.orig.shape[-2:], mode=self.mode)
return x
# %% ../../../nbs/15a_vision.models.unet.ipynb 9
class DynamicUnet(SequentialEx):
"Create a U-Net from a given architecture."
def __init__(self, encoder, n_out, img_size, blur=False, blur_final=True, self_attention=False,
y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation,
init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
imsize = img_size
sizes = model_sizes(encoder, size=imsize)
sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
x = dummy_eval(encoder, imsize).detach()
ni = sizes[-1][1]
middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, **kwargs),
ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, **kwargs)).eval()
x = middle_conv(x)
layers = [encoder, BatchNorm(ni), nn.ReLU(), middle_conv]
for i,idx in enumerate(sz_chg_idxs):
not_final = i!=len(sz_chg_idxs)-1
up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1])
do_blur = blur and (not_final or blur_final)
sa = self_attention and (i==len(sz_chg_idxs)-3)
unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa,
act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval()
layers.append(unet_block)
x = unet_block(x)
ni = x.shape[1]
if imsize != sizes[0][-2:]: layers.append(PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))
layers.append(ResizeToOrig())
if last_cross:
layers.append(MergeLayer(dense=True))
ni += in_channels(encoder)
layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, **kwargs))
layers += [ConvLayer(ni, n_out, ks=1, act_cls=None, norm_type=norm_type, **kwargs)]
apply_init(nn.Sequential(layers[3], layers[-2]), init)
#apply_init(nn.Sequential(layers[2]), init)
if y_range is not None: layers.append(SigmoidRange(*y_range))
layers.append(ToTensorBase())
super().__init__(*layers)
def __del__(self):
if hasattr(self, "sfs"): self.sfs.remove()
| 4,724 | 47.71134 | 118 | py |
fastai | fastai-master/fastai/tabular/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/40_tabular.core.ipynb.
# %% ../../nbs/40_tabular.core.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
# %% auto 0
__all__ = ['make_date', 'add_datepart', 'add_elapsed_times', 'cont_cat_split', 'df_shrink_dtypes', 'df_shrink', 'Tabular',
'TabularPandas', 'TabularProc', 'Categorify', 'FillStrategy', 'FillMissing', 'ReadTabBatch', 'show_batch',
'TabDataLoader']
# %% ../../nbs/40_tabular.core.ipynb 4
pd.set_option('mode.chained_assignment','raise')
# %% ../../nbs/40_tabular.core.ipynb 7
def make_date(df, date_field):
"Make sure `df[date_field]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
# %% ../../nbs/40_tabular.core.ipynb 9
def add_datepart(df, field_name, prefix=None, drop=True, time=False):
"Helper function that adds columns relevant to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
# Pandas removed `dt.week` in v1.1.10
week = field.dt.isocalendar().week.astype(field.dt.day.dtype) if hasattr(field.dt, 'isocalendar') else field.dt.week
for n in attr: df[prefix + n] = getattr(field.dt, n.lower()) if n != 'Week' else week
mask = ~field.isna()
df[prefix + 'Elapsed'] = np.where(mask,field.values.astype(np.int64) // 10 ** 9,np.nan)
if drop: df.drop(field_name, axis=1, inplace=True)
return df
# %% ../../nbs/40_tabular.core.ipynb 15
def _get_elapsed(df,field_names, date_field, base_field, prefix):
for f in field_names:
day1 = np.timedelta64(1, 'D')
last_date,last_base,res = np.datetime64(),None,[]
for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values):
if last_base is None or b != last_base:
last_date,last_base = np.datetime64(),b
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[prefix + f] = res
return df
# %% ../../nbs/40_tabular.core.ipynb 16
def add_elapsed_times(df, field_names, date_field, base_field):
"Add in `df` for each event in `field_names` the elapsed time according to `date_field` grouped by `base_field`"
field_names = list(L(field_names))
#Make sure date_field is a date and base_field a bool
df[field_names] = df[field_names].astype('bool')
make_date(df, date_field)
work_df = df[field_names + [date_field, base_field]]
work_df = work_df.sort_values([base_field, date_field])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After')
work_df = work_df.sort_values([base_field, date_field], ascending=[True, False])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before')
for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]:
work_df[a] = work_df[a].fillna(0).astype(int)
for a,s in zip([True, False], ['_bw', '_fw']):
work_df = work_df.set_index(date_field)
tmp = (work_df[[base_field] + field_names].sort_index(ascending=a)
.groupby(base_field).rolling(7, min_periods=1).sum())
if base_field in tmp: tmp.drop(base_field, axis=1,inplace=True)
tmp.reset_index(inplace=True)
work_df.reset_index(inplace=True)
work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s])
work_df.drop(field_names, axis=1, inplace=True)
return df.merge(work_df, 'left', [date_field, base_field])
# %% ../../nbs/40_tabular.core.ipynb 18
def cont_cat_split(df, max_card=20, dep_var=None):
"Helper function that returns column names of cont and cat variables from given `df`."
cont_names, cat_names = [], []
for label in df:
if label in L(dep_var): continue
if ((pd.api.types.is_integer_dtype(df[label].dtype) and
df[label].unique().shape[0] > max_card) or
pd.api.types.is_float_dtype(df[label].dtype)):
cont_names.append(label)
else: cat_names.append(label)
return cont_names, cat_names
# %% ../../nbs/40_tabular.core.ipynb 26
def df_shrink_dtypes(df, skip=[], obj2cat=True, int2uint=False):
"Return any possible smaller data types for DataFrame columns. Allows `object`->`category`, `int`->`uint`, and exclusion."
# 1: Build column filter and typemap
excl_types, skip = {'category','datetime64[ns]','bool'}, set(skip)
typemap = {'int' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.int8, np.int16, np.int32, np.int64)],
'uint' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.uint8, np.uint16, np.uint32, np.uint64)],
'float' : [(np.dtype(x), np.finfo(x).min, np.finfo(x).max) for x in (np.float32, np.float64, np.longdouble)]
}
if obj2cat: typemap['object'] = 'category' # User wants to categorify dtype('Object'), which may not always save space
else: excl_types.add('object')
new_dtypes = {}
exclude = lambda dt: dt[1].name not in excl_types and dt[0] not in skip
for c, old_t in filter(exclude, df.dtypes.items()):
t = next((v for k,v in typemap.items() if old_t.name.startswith(k)), None)
if isinstance(t, list): # Find the smallest type that fits
if int2uint and t==typemap['int'] and df[c].min() >= 0: t=typemap['uint']
new_t = next((r[0] for r in t if r[1]<=df[c].min() and r[2]>=df[c].max()), None)
if new_t and new_t == old_t: new_t = None
else: new_t = t if isinstance(t, str) else None
if new_t: new_dtypes[c] = new_t
return new_dtypes
# %% ../../nbs/40_tabular.core.ipynb 33
def df_shrink(df, skip=[], obj2cat=True, int2uint=False):
"Reduce DataFrame memory usage, by casting to smaller types returned by `df_shrink_dtypes()`."
dt = df_shrink_dtypes(df, skip, obj2cat=obj2cat, int2uint=int2uint)
return df.astype(dt)
# %% ../../nbs/40_tabular.core.ipynb 48
class _TabIloc:
"Get/set rows by iloc and cols by name"
def __init__(self,to): self.to = to
def __getitem__(self, idxs):
df = self.to.items
if isinstance(idxs,tuple):
rows,cols = idxs
cols = df.columns.isin(cols) if is_listy(cols) else df.columns.get_loc(cols)
else: rows,cols = idxs,slice(None)
return self.to.new(df.iloc[rows, cols])
# %% ../../nbs/40_tabular.core.ipynb 49
class Tabular(CollBase, GetAttr, FilteredBase):
"A `DataFrame` wrapper that knows which cols are cont/cat/y, and returns rows in `__getitem__`"
_default,with_cont='procs',True
def __init__(self, df, procs=None, cat_names=None, cont_names=None, y_names=None, y_block=None, splits=None,
do_setup=True, device=None, inplace=False, reduce_memory=True):
if inplace and splits is not None and pd.options.mode.chained_assignment is not None:
warn("Using inplace with splits will trigger a pandas error. Set `pd.options.mode.chained_assignment=None` to avoid it.")
if not inplace: df = df.copy()
if reduce_memory: df = df_shrink(df)
if splits is not None: df = df.iloc[sum(splits, [])]
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(df)
self.y_names,self.device = L(y_names),device
if y_block is None and self.y_names:
# Make ys categorical if they're not numeric
ys = df[self.y_names]
if len(ys.select_dtypes(include='number').columns)!=len(ys.columns): y_block = CategoryBlock()
else: y_block = RegressionBlock()
if y_block is not None and do_setup:
if callable(y_block): y_block = y_block()
procs = L(procs) + y_block.type_tfms
self.cat_names,self.cont_names,self.procs = L(cat_names),L(cont_names),Pipeline(procs)
self.split = len(df) if splits is None else len(splits[0])
if do_setup: self.setup()
def new(self, df, inplace=False):
return type(self)(df, do_setup=False, reduce_memory=False, y_block=TransformBlock(), inplace=inplace,
**attrdict(self, 'procs','cat_names','cont_names','y_names', 'device'))
def subset(self, i): return self.new(self.items[slice(0,self.split) if i==0 else slice(self.split,len(self))])
def copy(self): self.items = self.items.copy(); return self
def decode(self): return self.procs.decode(self)
def decode_row(self, row): return self.new(pd.DataFrame(row).T).decode().items.iloc[0]
def show(self, max_n=10, **kwargs): display_df(self.new(self.all_cols[:max_n]).decode().items)
def setup(self): self.procs.setup(self)
def process(self): self.procs(self)
def loc(self): return self.items.loc
def iloc(self): return _TabIloc(self)
def targ(self): return self.items[self.y_names]
def x_names (self): return self.cat_names + self.cont_names
def n_subsets(self): return 2
def y(self): return self[self.y_names[0]]
def new_empty(self): return self.new(pd.DataFrame({}, columns=self.items.columns))
def to_device(self, d=None):
self.device = d
return self
def all_col_names (self):
ys = [n for n in self.y_names if n in self.items.columns]
return self.x_names + self.y_names if len(ys) == len(self.y_names) else self.x_names
properties(Tabular,'loc','iloc','targ','all_col_names','n_subsets','x_names','y')
# %% ../../nbs/40_tabular.core.ipynb 51
class TabularPandas(Tabular):
"A `Tabular` object with transforms"
def transform(self, cols, f, all_col=True):
if not all_col: cols = [c for c in cols if c in self.items.columns]
if len(cols) > 0: self[cols] = self[cols].transform(f)
# %% ../../nbs/40_tabular.core.ipynb 52
def _add_prop(cls, nm):
@property
def f(o): return o[list(getattr(o,nm+'_names'))]
@f.setter
def fset(o, v): o[getattr(o,nm+'_names')] = v
setattr(cls, nm+'s', f)
setattr(cls, nm+'s', fset)
_add_prop(Tabular, 'cat')
_add_prop(Tabular, 'cont')
_add_prop(Tabular, 'y')
_add_prop(Tabular, 'x')
_add_prop(Tabular, 'all_col')
# %% ../../nbs/40_tabular.core.ipynb 56
class TabularProc(InplaceTransform):
"Base class to write a non-lazy tabular processor for dataframes"
def setup(self, items=None, train_setup=False): #TODO: properly deal with train_setup
super().setup(getattr(items,'train',items), train_setup=False)
# Procs are called as soon as data is available
return self(items.items if isinstance(items,Datasets) else items)
@property
def name(self): return f"{super().name} -- {getattr(self,'__stored_args__',{})}"
# %% ../../nbs/40_tabular.core.ipynb 58
def _apply_cats (voc, add, c):
if not is_categorical_dtype(c):
return pd.Categorical(c, categories=voc[c.name][add:]).codes+add
return c.cat.codes+add #if is_categorical_dtype(c) else c.map(voc[c.name].o2i)
def _decode_cats(voc, c): return c.map(dict(enumerate(voc[c.name].items)))
# %% ../../nbs/40_tabular.core.ipynb 59
class Categorify(TabularProc):
"Transform the categorical variables to something similar to `pd.Categorical`"
order = 1
def setups(self, to):
store_attr(classes={n:CategoryMap(to.iloc[:,n].items, add_na=(n in to.cat_names)) for n in to.cat_names}, but='to')
def encodes(self, to): to.transform(to.cat_names, partial(_apply_cats, self.classes, 1))
def decodes(self, to): to.transform(to.cat_names, partial(_decode_cats, self.classes))
def __getitem__(self,k): return self.classes[k]
# %% ../../nbs/40_tabular.core.ipynb 60
@Categorize
def setups(self, to:Tabular):
if len(to.y_names) > 0:
if self.vocab is None:
self.vocab = CategoryMap(getattr(to, 'train', to).iloc[:,to.y_names[0]].items, strict=True)
else:
self.vocab = CategoryMap(self.vocab, sort=False, add_na=self.add_na)
self.c = len(self.vocab)
return self(to)
@Categorize
def encodes(self, to:Tabular):
to.transform(to.y_names, partial(_apply_cats, {n: self.vocab for n in to.y_names}, 0), all_col=False)
return to
@Categorize
def decodes(self, to:Tabular):
to.transform(to.y_names, partial(_decode_cats, {n: self.vocab for n in to.y_names}), all_col=False)
return to
# %% ../../nbs/40_tabular.core.ipynb 74
@Normalize
def setups(self, to:Tabular):
store_attr(but='to', means=dict(getattr(to, 'train', to).conts.mean()),
stds=dict(getattr(to, 'train', to).conts.std(ddof=0)+1e-7))
return self(to)
@Normalize
def encodes(self, to:Tabular):
to.conts = (to.conts-self.means) / self.stds
return to
@Normalize
def decodes(self, to:Tabular):
to.conts = (to.conts*self.stds ) + self.means
return to
# %% ../../nbs/40_tabular.core.ipynb 79
class FillStrategy:
"Namespace containing the various filling strategies."
def median (c,fill): return c.median()
def constant(c,fill): return fill
def mode (c,fill): return c.dropna().value_counts().idxmax()
# %% ../../nbs/40_tabular.core.ipynb 81
class FillMissing(TabularProc):
"Fill the missing values in continuous columns."
def __init__(self, fill_strategy=FillStrategy.median, add_col=True, fill_vals=None):
if fill_vals is None: fill_vals = defaultdict(int)
store_attr()
def setups(self, to):
missing = pd.isnull(to.conts).any()
store_attr(but='to', na_dict={n:self.fill_strategy(to[n], self.fill_vals[n])
for n in missing[missing].keys()})
self.fill_strategy = self.fill_strategy.__name__
def encodes(self, to):
missing = pd.isnull(to.conts)
for n in missing.any()[missing.any()].keys():
assert n in self.na_dict, f"nan values in `{n}` but not in setup training set"
for n in self.na_dict.keys():
to[n].fillna(self.na_dict[n], inplace=True)
if self.add_col:
to.loc[:,n+'_na'] = missing[n]
if n+'_na' not in to.cat_names: to.cat_names.append(n+'_na')
# %% ../../nbs/40_tabular.core.ipynb 91
def _maybe_expand(o): return o[:,None] if o.ndim==1 else o
# %% ../../nbs/40_tabular.core.ipynb 92
class ReadTabBatch(ItemTransform):
"Transform `TabularPandas` values into a `Tensor` with the ability to decode"
def __init__(self, to): self.to = to.new_empty()
def encodes(self, to):
if not to.with_cont: res = (tensor(to.cats).long(),)
else: res = (tensor(to.cats).long(),tensor(to.conts).float())
ys = [n for n in to.y_names if n in to.items.columns]
if len(ys) == len(to.y_names): res = res + (tensor(to.targ),)
if to.device is not None: res = to_device(res, to.device)
return res
def decodes(self, o):
o = [_maybe_expand(o_) for o_ in to_np(o) if o_.size != 0]
vals = np.concatenate(o, axis=1)
try: df = pd.DataFrame(vals, columns=self.to.all_col_names)
except: df = pd.DataFrame(vals, columns=self.to.x_names)
to = self.to.new(df)
return to
# %% ../../nbs/40_tabular.core.ipynb 93
@typedispatch
def show_batch(x: Tabular, y, its, max_n=10, ctxs=None):
x.show()
# %% ../../nbs/40_tabular.core.ipynb 94
@delegates()
class TabDataLoader(TfmdDL):
"A transformed `DataLoader` for Tabular data"
def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs):
if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTabBatch(dataset)
super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs)
def create_batch(self, b): return self.dataset.iloc[b]
def do_item(self, s): return 0 if s is None else s
TabularPandas._dl_type = TabDataLoader
# %% ../../nbs/40_tabular.core.ipynb 113
@EncodedMultiCategorize
def setups(self, to:Tabular):
self.c = len(self.vocab)
return self(to)
@EncodedMultiCategorize
def encodes(self, to:Tabular): return to
@EncodedMultiCategorize
def decodes(self, to:Tabular):
to.transform(to.y_names, lambda c: c==1)
return to
# %% ../../nbs/40_tabular.core.ipynb 126
@RegressionSetup
def setups(self, to:Tabular):
if self.c is not None: return
self.c = len(to.y_names)
return to
@RegressionSetup
def encodes(self, to:Tabular): return to
@RegressionSetup
def decodes(self, to:Tabular): return to
| 16,968 | 43.075325 | 133 | py |
fastai | fastai-master/fastai/tabular/model.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/42_tabular.model.ipynb.
# %% ../../nbs/42_tabular.model.ipynb 1
from __future__ import annotations
from ..torch_basics import *
from .core import *
# %% auto 0
__all__ = ['emb_sz_rule', 'get_emb_sz', 'TabularModel', 'tabular_config']
# %% ../../nbs/42_tabular.model.ipynb 6
def emb_sz_rule(
n_cat:int # Cardinality of a category
) -> int:
"Rule of thumb to pick embedding size corresponding to `n_cat`"
return min(600, round(1.6 * n_cat**0.56))
# %% ../../nbs/42_tabular.model.ipynb 7
def _one_emb_sz(classes, n, sz_dict=None):
"Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
sz_dict = ifnone(sz_dict, {})
n_cat = len(classes[n])
sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
return n_cat,sz
# %% ../../nbs/42_tabular.model.ipynb 9
def get_emb_sz(
to:Tabular|TabularPandas,
sz_dict:dict=None # Dictionary of {'class_name' : size, ...} to override default `emb_sz_rule`
) -> list: # List of embedding sizes for each category
"Get embedding size for each cat_name in `Tabular` or `TabularPandas`, or populate embedding size manually using sz_dict"
return [_one_emb_sz(to.classes, n, sz_dict) for n in to.cat_names]
# %% ../../nbs/42_tabular.model.ipynb 10
class TabularModel(Module):
"Basic model for tabular data."
def __init__(self,
emb_szs:list, # Sequence of (num_embeddings, embedding_dim) for each categorical variable
n_cont:int, # Number of continuous variables
out_sz:int, # Number of outputs for final `LinBnDrop` layer
layers:list, # Sequence of ints used to specify the input and output size of each `LinBnDrop` layer
ps:float|MutableSequence=None, # Sequence of dropout probabilities for `LinBnDrop`
embed_p:float=0., # Dropout probability for `Embedding` layer
y_range=None, # Low and high for `SigmoidRange` activation
use_bn:bool=True, # Use `BatchNorm1d` in `LinBnDrop` layers
bn_final:bool=False, # Use `BatchNorm1d` on final layer
bn_cont:bool=True, # Use `BatchNorm1d` on continuous variables
act_cls=nn.ReLU(inplace=True), # Activation type for `LinBnDrop` layers
lin_first:bool=True # Linear layer is first or last in `LinBnDrop` layers
):
ps = ifnone(ps, [0]*len(layers))
if not is_listy(ps): ps = [ps]*len(layers)
self.embeds = nn.ModuleList([Embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(embed_p)
self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont = n_emb,n_cont
sizes = [n_emb + n_cont] + layers + [out_sz]
actns = [act_cls for _ in range(len(sizes)-2)] + [None]
_layers = [LinBnDrop(sizes[i], sizes[i+1], bn=use_bn and (i!=len(actns)-1 or bn_final), p=p, act=a, lin_first=lin_first)
for i,(p,a) in enumerate(zip(ps+[0.],actns))]
if y_range is not None: _layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*_layers)
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return self.layers(x)
# %% ../../nbs/42_tabular.model.ipynb 13
@delegates(TabularModel.__init__)
def tabular_config(**kwargs):
"Convenience function to easily create a config for `TabularModel`"
return kwargs
| 3,750 | 45.8875 | 128 | py |
fastai | fastai-master/fastai/tabular/data.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/41_tabular.data.ipynb.
# %% ../../nbs/41_tabular.data.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
from .core import *
# %% auto 0
__all__ = ['TabularDataLoaders']
# %% ../../nbs/41_tabular.data.ipynb 7
class TabularDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for tabular data"
@classmethod
@delegates(Tabular.dataloaders, but=["dl_type", "dl_kwargs"])
def from_df(cls,
df:pd.DataFrame,
path:str|Path='.', # Location of `df`, defaults to current working directory
procs:list=None, # List of `TabularProc`s
cat_names:list=None, # Column names pertaining to categorical variables
cont_names:list=None, # Column names pertaining to continuous variables
y_names:list=None, # Names of the dependent variables
y_block:TransformBlock=None, # `TransformBlock` to use for the target(s)
valid_idx:list=None, # List of indices to use for the validation set, defaults to a random split
**kwargs
):
"Create `TabularDataLoaders` from `df` in `path` using `procs`"
if cat_names is None: cat_names = []
if cont_names is None: cont_names = list(set(df)-set(L(cat_names))-set(L(y_names)))
splits = RandomSplitter()(df) if valid_idx is None else IndexSplitter(valid_idx)(df)
to = TabularPandas(df, procs, cat_names, cont_names, y_names, splits=splits, y_block=y_block)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls,
csv:str|Path|io.BufferedReader, # A csv of training data
skipinitialspace:bool=True, # Skip spaces after delimiter
**kwargs
):
"Create `TabularDataLoaders` from `csv` file in `path` using `procs`"
return cls.from_df(pd.read_csv(csv, skipinitialspace=skipinitialspace), **kwargs)
@delegates(TabDataLoader.__init__)
def test_dl(self,
test_items, # Items to create new test `TabDataLoader` formatted the same as the training data
rm_type_tfms=None, # Number of `Transform`s to be removed from `procs`
process:bool=True, # Apply validation `TabularProc`s to `test_items` immediately
inplace:bool=False, # Keep separate copy of original `test_items` in memory if `False`
**kwargs
):
"Create test `TabDataLoader` from `test_items` using validation `procs`"
to = self.train_ds.new(test_items, inplace=inplace)
if process: to.process()
return self.valid.new(to, **kwargs)
Tabular._dbunch_type = TabularDataLoaders
TabularDataLoaders.from_csv = delegates(to=TabularDataLoaders.from_df)(TabularDataLoaders.from_csv)
| 2,768 | 45.932203 | 104 | py |
fastai | fastai-master/fastai/medical/imaging.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/60_medical.imaging.ipynb.
# %% ../../nbs/60_medical.imaging.ipynb 4
from __future__ import annotations
from ..basics import *
from ..vision.all import *
from ..data.transforms import *
import pydicom,kornia,skimage
from pydicom.dataset import Dataset as DcmDataset
from pydicom.tag import BaseTag as DcmTag
from pydicom.multival import MultiValue as DcmMultiValue
from PIL import Image
try:
import cv2
cv2.setNumThreads(0)
except: pass
# %% auto 0
__all__ = ['dicom_windows', 'get_dicom_files', 'TensorDicom', 'PILDicom', 'array_freqhist_bins', 'TensorCTScan', 'PILCTScan',
'uniform_blur2d', 'gauss_blur2d', 'mask2bbox', 'crop_resize', 'DicomSegmentationDataLoaders', 'DcmDataset',
'DcmTag', 'DcmMultiValue', 'dcmread']
# %% ../../nbs/60_medical.imaging.ipynb 7
_all_ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'get_dicom_files', 'DicomSegmentationDataLoaders']
# %% ../../nbs/60_medical.imaging.ipynb 9
def get_dicom_files(path, recurse=True, folders=None):
"Get dicom files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=[".dcm",".dicom"], recurse=recurse, folders=folders)
# %% ../../nbs/60_medical.imaging.ipynb 10
@patch
def dcmread(fn:Path, force = False):
"Open a `DICOM` file"
return pydicom.dcmread(str(fn), force)
# %% ../../nbs/60_medical.imaging.ipynb 14
class TensorDicom(TensorImage):
"Inherits from `TensorImage` and converts the `pixel_array` into a `TensorDicom`"
_show_args = {'cmap':'gray'}
# %% ../../nbs/60_medical.imaging.ipynb 15
class PILDicom(PILBase):
_open_args,_tensor_cls,_show_args = {},TensorDicom,TensorDicom._show_args
@classmethod
def create(cls, fn:Path|str|bytes, mode=None)->None:
"Open a `DICOM file` from path `fn` or bytes `fn` and load it as a `PIL Image`"
if isinstance(fn,bytes): im = Image.fromarray(pydicom.dcmread(pydicom.filebase.DicomBytesIO(fn)).pixel_array)
if isinstance(fn,(Path,str)): im = Image.fromarray(pydicom.dcmread(fn).pixel_array)
im.load()
im = im._new(im.im)
return cls(im.convert(mode) if mode else im)
PILDicom._tensor_cls = TensorDicom
# %% ../../nbs/60_medical.imaging.ipynb 16
@patch
def png16read(self:Path): return array(Image.open(self), dtype=np.uint16)
# %% ../../nbs/60_medical.imaging.ipynb 17
@patch(as_prop=True)
def pixels(self:DcmDataset):
"`pixel_array` as a tensor"
return tensor(self.pixel_array.astype(np.float32))
# %% ../../nbs/60_medical.imaging.ipynb 19
@patch(as_prop=True)
def scaled_px(self:DcmDataset):
"`pixels` scaled by `RescaleSlope` and `RescaleIntercept`"
img = self.pixels
if hasattr(self, 'RescaleSlope') and hasattr(self, 'RescaleIntercept') is not None:
return img * self.RescaleSlope + self.RescaleIntercept
else: return img
# %% ../../nbs/60_medical.imaging.ipynb 25
def array_freqhist_bins(self, n_bins=100):
"A numpy based function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = np.sort(self.flatten())
t = np.array([0.001])
t = np.append(t, np.arange(n_bins)/n_bins+(1/2/n_bins))
t = np.append(t, 0.999)
t = (len(imsd)*t+0.5).astype(int)
return np.unique(imsd[t])
# %% ../../nbs/60_medical.imaging.ipynb 26
@patch
def freqhist_bins(self:Tensor, n_bins=100):
"A function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = self.view(-1).sort()[0]
t = torch.cat([tensor([0.001]),
torch.arange(n_bins).float()/n_bins+(1/2/n_bins),
tensor([0.999])])
t = (len(imsd)*t).long()
return imsd[t].unique()
# %% ../../nbs/60_medical.imaging.ipynb 33
@patch
def hist_scaled_pt(self:Tensor, brks=None):
# Pytorch-only version - switch to this if/when interp_1d can be optimized
if brks is None: brks = self.freqhist_bins()
brks = brks.to(self.device)
ys = torch.linspace(0., 1., len(brks)).to(self.device)
return self.flatten().interp_1d(brks, ys).reshape(self.shape).clamp(0.,1.)
# %% ../../nbs/60_medical.imaging.ipynb 34
@patch
def hist_scaled(self:Tensor, brks=None):
"Scales a tensor using `freqhist_bins` to values between 0 and 1"
if self.device.type=='cuda': return self.hist_scaled_pt(brks)
if brks is None: brks = self.freqhist_bins()
ys = np.linspace(0., 1., len(brks))
x = self.numpy().flatten()
x = np.interp(x, brks.numpy(), ys)
return tensor(x).reshape(self.shape).clamp(0.,1.)
# %% ../../nbs/60_medical.imaging.ipynb 39
@patch
def hist_scaled(self:DcmDataset, brks=None, min_px=None, max_px=None):
"Pixels scaled to a `min_px` and `max_px` value"
px = self.scaled_px
if min_px is not None: px[px<min_px] = min_px
if max_px is not None: px[px>max_px] = max_px
return px.hist_scaled(brks=brks)
# %% ../../nbs/60_medical.imaging.ipynb 43
@patch
def windowed(self:Tensor, w, l):
"Scale pixel intensity by window width and window level"
px = self.clone()
px_min = l - w//2
px_max = l + w//2
px[px<px_min] = px_min
px[px>px_max] = px_max
return (px-px_min) / (px_max-px_min)
# %% ../../nbs/60_medical.imaging.ipynb 44
@patch
def windowed(self:DcmDataset, w, l):
return self.scaled_px.windowed(w,l)
# %% ../../nbs/60_medical.imaging.ipynb 45
# From https://radiopaedia.org/articles/windowing-ct
dicom_windows = types.SimpleNamespace(
brain=(80,40),
subdural=(254,100),
stroke=(8,32),
brain_bone=(2800,600),
brain_soft=(375,40),
lungs=(1500,-600),
mediastinum=(350,50),
abdomen_soft=(400,50),
liver=(150,30),
spine_soft=(250,50),
spine_bone=(1800,400)
)
# %% ../../nbs/60_medical.imaging.ipynb 47
class TensorCTScan(TensorImageBW):
"Inherits from `TensorImageBW` and converts the `pixel_array` into a `TensorCTScan`"
_show_args = {'cmap':'bone'}
# %% ../../nbs/60_medical.imaging.ipynb 49
class PILCTScan(PILBase): _open_args,_tensor_cls,_show_args = {},TensorCTScan,TensorCTScan._show_args
# %% ../../nbs/60_medical.imaging.ipynb 50
@patch
@delegates(show_image)
def show(self:DcmDataset, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Display a normalized dicom image by default"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
show_image(px, cmap=cmap, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 54
@patch
def show(self:DcmDataset, frames=1, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Adds functionality to view dicom images where each file may have more than 1 frame"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
if px.ndim > 2:
gh=[]
p = px.shape; print(f'{p[0]} frames per file')
for i in range(frames): u = px[i]; gh.append(u)
show_images(gh, **kwargs)
else: show_image(px, cmap=cmap, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 56
@patch
def pct_in_window(dcm:DcmDataset, w, l):
"% of pixels in the window `(w,l)`"
px = dcm.scaled_px
return ((px > l-w//2) & (px < l+w//2)).float().mean().item()
# %% ../../nbs/60_medical.imaging.ipynb 59
def uniform_blur2d(x,s):
"Uniformly apply blurring"
w = x.new_ones(1,1,1,s)/s
# Factor 2d conv into 2 1d convs
x = unsqueeze(x, dim=0, n=4-x.dim())
r = (F.conv2d(x, w, padding=s//2))
r = (F.conv2d(r, w.transpose(-1,-2), padding=s//2)).cpu()[:,0]
return r.squeeze()
# %% ../../nbs/60_medical.imaging.ipynb 61
def gauss_blur2d(x,s):
"Apply gaussian_blur2d kornia filter"
s2 = int(s/4)*2+1
x2 = unsqueeze(x, dim=0, n=4-x.dim())
res = kornia.filters.gaussian_blur2d(x2, (s2,s2), (s,s), 'replicate')
return res.squeeze()
# %% ../../nbs/60_medical.imaging.ipynb 64
@patch
def mask_from_blur(x:Tensor, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
p = x.windowed(*window)
if remove_max: p[p==1] = 0
return gauss_blur2d(p, s=sigma*x.shape[-1])>thresh
# %% ../../nbs/60_medical.imaging.ipynb 65
@patch
def mask_from_blur(x:DcmDataset, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
return to_device(x.scaled_px).mask_from_blur(window, sigma, thresh, remove_max=remove_max)
# %% ../../nbs/60_medical.imaging.ipynb 67
def _px_bounds(x, dim):
c = x.sum(dim).nonzero().cpu()
idxs,vals = torch.unique(c[:,0],return_counts=True)
vs = torch.split_with_sizes(c[:,1],tuple(vals))
d = {k.item():v for k,v in zip(idxs,vs)}
default_u = tensor([0,x.shape[-1]-1])
b = [d.get(o,default_u) for o in range(x.shape[0])]
b = [tensor([o.min(),o.max()]) for o in b]
return torch.stack(b)
# %% ../../nbs/60_medical.imaging.ipynb 68
def mask2bbox(mask):
no_batch = mask.dim()==2
if no_batch: mask = mask[None]
bb1 = _px_bounds(mask,-1).t()
bb2 = _px_bounds(mask,-2).t()
res = torch.stack([bb1,bb2],dim=1).to(mask.device)
return res[...,0] if no_batch else res
# %% ../../nbs/60_medical.imaging.ipynb 70
def _bbs2sizes(crops, init_sz, use_square=True):
bb = crops.flip(1)
szs = (bb[1]-bb[0])
if use_square: szs = szs.max(0)[0][None].repeat((2,1))
overs = (szs+bb[0])>init_sz
bb[0][overs] = init_sz-szs[overs]
lows = (bb[0]/float(init_sz))
return lows,szs/float(init_sz)
# %% ../../nbs/60_medical.imaging.ipynb 71
def crop_resize(x, crops, new_sz):
# NB assumes square inputs. Not tested for non-square anythings!
bs = x.shape[0]
lows,szs = _bbs2sizes(crops, x.shape[-1])
if not isinstance(new_sz,(list,tuple)): new_sz = (new_sz,new_sz)
id_mat = tensor([[1.,0,0],[0,1,0]])[None].repeat((bs,1,1)).to(x.device)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
sp = F.affine_grid(id_mat, (bs,1,*new_sz))+1.
grid = sp*unsqueeze(szs.t(),1,n=2)+unsqueeze(lows.t()*2.,1,n=2)
return F.grid_sample(x.unsqueeze(1), grid-1)
# %% ../../nbs/60_medical.imaging.ipynb 75
@patch
def to_nchan(x:Tensor, wins, bins=None):
res = [x.windowed(*win) for win in wins]
if not isinstance(bins,int) or bins!=0: res.append(x.hist_scaled(bins).clamp(0,1))
dim = [0,1][x.dim()==3]
return TensorCTScan(torch.stack(res, dim=dim))
# %% ../../nbs/60_medical.imaging.ipynb 76
@patch
def to_nchan(x:DcmDataset, wins, bins=None):
return x.scaled_px.to_nchan(wins, bins)
# %% ../../nbs/60_medical.imaging.ipynb 80
@patch
def to_3chan(x:Tensor, win1, win2, bins=None):
return x.to_nchan([win1,win2],bins=bins)
# %% ../../nbs/60_medical.imaging.ipynb 81
@patch
def to_3chan(x:DcmDataset, win1, win2, bins=None):
return x.scaled_px.to_3chan(win1, win2, bins)
# %% ../../nbs/60_medical.imaging.ipynb 83
@patch
def save_jpg(x:Tensor|DcmDataset, path, wins, bins=None, quality=90):
"Save tensor or dicom image into `jpg` format"
fn = Path(path).with_suffix('.jpg')
x = (x.to_nchan(wins, bins)*255).byte()
im = Image.fromarray(x.permute(1,2,0).numpy(), mode=['RGB','CMYK'][x.shape[0]==4])
im.save(fn, quality=quality)
# %% ../../nbs/60_medical.imaging.ipynb 84
@patch
def to_uint16(x:Tensor|DcmDataset, bins=None):
"Convert into a unit16 array"
d = x.hist_scaled(bins).clamp(0,1) * 2**16
return d.numpy().astype(np.uint16)
# %% ../../nbs/60_medical.imaging.ipynb 85
@patch
def save_tif16(x:Tensor|DcmDataset, path, bins=None, compress=True):
"Save tensor or dicom image into `tiff` format"
fn = Path(path).with_suffix('.tif')
Image.fromarray(x.to_uint16(bins)).save(str(fn), compression='tiff_deflate' if compress else None)
# %% ../../nbs/60_medical.imaging.ipynb 87
@patch
def set_pixels(self:DcmDataset, px):
self.PixelData = px.tobytes()
self.Rows,self.Columns = px.shape
DcmDataset.pixel_array = property(DcmDataset.pixel_array.fget, set_pixels)
# %% ../../nbs/60_medical.imaging.ipynb 88
@patch
def zoom(self:DcmDataset, ratio):
"Zoom image by specified ratio"
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self.set_pixels(ndimage.zoom(self.pixel_array, ratio))
# %% ../../nbs/60_medical.imaging.ipynb 92
@patch
def zoom_to(self:DcmDataset, sz):
"Change image size to specified pixel size"
if not isinstance(sz,(list,tuple)): sz=(sz,sz)
rows,cols = sz
self.zoom((rows/self.Rows,cols/self.Columns))
# %% ../../nbs/60_medical.imaging.ipynb 94
@patch(as_prop=True)
def shape(self:DcmDataset):
"Returns the shape of a dicom image as rows and columns"
return self.Rows,self.Columns
# %% ../../nbs/60_medical.imaging.ipynb 97
def _cast_dicom_special(x):
cls = type(x)
if not cls.__module__.startswith('pydicom'): return x
if cls.__base__ == object: return x
return cls.__base__(x)
def _split_elem(vals):
res = dict()
for val in vals:
k, v = val.keyword, val.value
if not isinstance(v,DcmMultiValue):
res[k] = v
continue
res[f'Multi{k}'] = 1
for i,o in enumerate(v): res[f'{k}{"" if i==0 else i}'] = o
return {k: _cast_dicom_special(v) for k, v in res.items()}
# %% ../../nbs/60_medical.imaging.ipynb 98
@patch
def as_dict(self:DcmDataset, px_summ=True, window=dicom_windows.brain):
"Convert the header of a dicom into a dictionary"
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
res = _split_elem(vals)
res['fname'] = self.filename
if not px_summ: return res
stats = 'min','max','mean','std'
try:
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs,f)()
res['img_pct_window'] = self.pct_in_window(*window)
except Exception as e:
for f in stats: res['img_'+f] = 0
print(res,e)
return res
# %% ../../nbs/60_medical.imaging.ipynb 101
def _dcm2dict(fn, window=dicom_windows.brain, px_summ=True, **kwargs):
return fn.dcmread().as_dict(window=window, px_summ=px_summ, **kwargs)
# %% ../../nbs/60_medical.imaging.ipynb 102
@delegates(parallel)
def _from_dicoms(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms = classmethod(_from_dicoms)
# %% ../../nbs/60_medical.imaging.ipynb 105
class DicomSegmentationDataLoaders(DataLoaders):
"Basic wrapper around DICOM `DataLoaders` with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock(cls=PILDicom), MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
| 15,588 | 36.92944 | 136 | py |
fastai | fastai-master/fastai/data/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/03_data.core.ipynb.
# %% ../../nbs/03_data.core.ipynb 3
from __future__ import annotations
from ..torch_basics import *
from .load import *
# %% auto 0
__all__ = ['show_batch', 'show_results', 'TfmdDL', 'DataLoaders', 'FilteredBase', 'TfmdLists', 'decode_at', 'show_at', 'Datasets',
'test_set']
# %% ../../nbs/03_data.core.ipynb 8
@typedispatch
def show_batch(
x, # Input(s) in the batch
y, # Target(s) in the batch
samples, # List of (`x`, `y`) pairs of length `max_n`
ctxs=None, # List of `ctx` objects to show data. Could be a matplotlib axis, DataFrame, etc.
max_n=9, # Maximum number of `samples` to show
**kwargs
):
"Show `max_n` input(s) and target(s) from the batch."
if ctxs is None: ctxs = Inf.nones
if hasattr(samples[0], 'show'):
ctxs = [s.show(ctx=c, **kwargs) for s,c,_ in zip(samples,ctxs,range(max_n))]
else:
for i in range_of(samples[0]):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
# %% ../../nbs/03_data.core.ipynb 10
@typedispatch
def show_results(
x, # Input(s) in the batch
y, # Target(s) in the batch
samples, # List of (`x`, `y`) pairs of length `max_n`
outs, # List of predicted output(s) from the model
ctxs=None, # List of `ctx` objects to show data. Could be a matplotlib axis, DataFrame, etc.
max_n=9, # Maximum number of `samples` to show
**kwargs
):
"Show `max_n` results with input(s), target(s) and prediction(s)."
if ctxs is None: ctxs = Inf.nones
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
for i in range(len(outs[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
return ctxs
# %% ../../nbs/03_data.core.ipynb 12
_all_ = ["show_batch", "show_results"]
# %% ../../nbs/03_data.core.ipynb 13
_batch_tfms = ('after_item','before_batch','after_batch')
# %% ../../nbs/03_data.core.ipynb 14
@delegates()
class TfmdDL(DataLoader):
"Transformed `DataLoader`"
def __init__(self,
dataset, # Map- or iterable-style dataset from which to load the data
bs:int=64, # Size of batch
shuffle:bool=False, # Whether to shuffle data
num_workers:int=None, # Number of CPU cores to use in parallel (default: All available up to 16)
verbose:bool=False, # Whether to print verbose logs
do_setup:bool=True, # Whether to run `setup()` for batch transform(s)
**kwargs
):
if num_workers is None: num_workers = min(16, defaults.cpus)
for nm in _batch_tfms: kwargs[nm] = Pipeline(kwargs.get(nm,None))
super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
if do_setup:
for nm in _batch_tfms:
pv(f"Setting up {nm}: {kwargs[nm]}", verbose)
kwargs[nm].setup(self)
def _one_pass(self):
b = self.do_batch([self.do_item(None)])
if self.device is not None: b = to_device(b, self.device)
its = self.after_batch(b)
self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
self._types = explode_types(its)
def _retain_dl(self,b):
if not getattr(self, '_types', None): self._one_pass()
return retain_types(b, typs=self._types)
@delegates(DataLoader.new)
def new(self,
dataset=None, # Map- or iterable-style dataset from which to load the data
cls=None, # Class of the newly created `DataLoader` object
**kwargs
):
res = super().new(dataset, cls, do_setup=False, **kwargs)
if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):
try:
self._one_pass()
res._n_inp,res._types = self._n_inp,self._types
except Exception as e:
print("Could not do one pass in your dataloader, there is something wrong in it. Please see the stack trace below:")
raise
else: res._n_inp,res._types = self._n_inp,self._types
return res
def before_iter(self):
super().before_iter()
split_idx = getattr(self.dataset, 'split_idx', None)
for nm in _batch_tfms:
f = getattr(self,nm)
if isinstance(f,Pipeline): f.split_idx=split_idx
def decode(self,
b # Batch to decode
):
return to_cpu(self.after_batch.decode(self._retain_dl(b)))
def decode_batch(self,
b, # Batch to decode
max_n:int=9, # Maximum number of items to decode
full:bool=True # Whether to decode all transforms. If `False`, decode up to the point the item knows how to show itself
):
return self._decode_batch(self.decode(b), max_n, full)
def _decode_batch(self, b, max_n=9, full=True):
f = self.after_item.decode
f1 = self.before_batch.decode
f = compose(f1, f, partial(getcallable(self.dataset,'decode'), full = full))
return L(batch_to_samples(b, max_n=max_n)).map(f)
def _pre_show_batch(self, b, max_n=9):
"Decode `b` to be ready for `show_batch`"
b = self.decode(b)
if hasattr(b, 'show'): return b,None,None
its = self._decode_batch(b, max_n, full=False)
if not is_listy(b): b,its = [b],L((o,) for o in its)
return detuplify(b[:self.n_inp]),detuplify(b[self.n_inp:]),its
def show_batch(self,
b=None, # Batch to show
max_n:int=9, # Maximum number of items to show
ctxs=None, # List of `ctx` objects to show data. Could be matplotlib axis, DataFrame etc
show:bool=True, # Whether to display data
unique:bool=False, # Whether to show only one
**kwargs
):
"Show `max_n` input(s) and target(s) from the batch."
if unique:
old_get_idxs = self.get_idxs
self.get_idxs = lambda: Inf.zeros
if b is None: b = self.one_batch()
if not show: return self._pre_show_batch(b, max_n=max_n)
show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
if unique: self.get_idxs = old_get_idxs
def show_results(self,
b, # Batch to show results for
out, # Predicted output from model for the batch
max_n:int=9, # Maximum number of items to show
ctxs=None, # List of `ctx` objects to show data. Could be matplotlib axis, DataFrame etc
show:bool=True, # Whether to display data
**kwargs
):
"Show `max_n` results with input(s), target(s) and prediction(s)."
x,y,its = self.show_batch(b, max_n=max_n, show=False)
b_out = type(b)(b[:self.n_inp] + (tuple(out) if is_listy(out) else (out,)))
x1,y1,outs = self.show_batch(b_out, max_n=max_n, show=False)
res = (x,x1,None,None) if its is None else (x, y, its, outs.itemgot(slice(self.n_inp,None)))
if not show: return res
show_results(*res, ctxs=ctxs, max_n=max_n, **kwargs)
@property
def n_inp(self) -> int:
"Number of elements in `Datasets` or `TfmdDL` tuple to be considered part of input."
if hasattr(self.dataset, 'n_inp'): return self.dataset.n_inp
if not hasattr(self, '_n_inp'): self._one_pass()
return self._n_inp
def to(self,
device # Device to put `DataLoader` and transforms
):
self.device = device
for tfm in self.after_batch.fs:
# Check that tfm.to is callable as TabularPandas & transforms set tfm.to as an object
if hasattr(tfm, 'to') and callable(tfm.to): tfm.to(device)
else:
for a in L(getattr(tfm, 'parameters', None)): setattr(tfm, a, getattr(tfm, a).to(device))
return self
# %% ../../nbs/03_data.core.ipynb 16
add_docs(TfmdDL,
decode="Decode `b` using `tfms`",
decode_batch="Decode `b` entirely",
new="Create a new version of self with a few changed attributes",
show_batch="Show `b` (defaults to `one_batch`), a list of lists of pipeline outputs (i.e. output of a `DataLoader`)",
show_results="Show each item of `b` and `out`",
before_iter="override",
to="Put self and its transforms state on `device`")
# %% ../../nbs/03_data.core.ipynb 34
@docs
class DataLoaders(GetAttr):
"Basic wrapper around several `DataLoader`s."
_default='train'
def __init__(self,
*loaders, # `DataLoader` objects to wrap
path:str|Path='.', # Path to store export objects
device=None # Device to put `DataLoaders`
):
self.loaders,self.path = list(loaders),Path(path)
if device is not None and (loaders!=() and hasattr(loaders[0],'to')): self.device = device
def __getitem__(self, i): return self.loaders[i]
def __len__(self): return len(self.loaders)
def new_empty(self):
loaders = [dl.new(dl.dataset.new_empty()) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
def _set(i, self, v): self.loaders[i] = v
train ,valid = add_props(lambda i,x: x[i], _set)
train_ds,valid_ds = add_props(lambda i,x: x[i].dataset)
@property
def device(self): return self._device
@device.setter
def device(self,
d # Device to put `DataLoaders`
):
for dl in self.loaders: dl.to(d)
self._device = d
def to(self,
device # Device to put `DataLoaders`
):
self.device = device
return self
def _add_tfms(self, tfms, event, dl_idx):
"Adds `tfms` to `event` on `dl`"
if(isinstance(dl_idx,str)): dl_idx = 0 if(dl_idx=='train') else 1
dl_tfms = getattr(self[dl_idx], event)
apply(dl_tfms.add, tfms)
def add_tfms(self,
tfms, # List of `Transform`(s) or `Pipeline` to apply
event, # When to run `Transform`. Events mentioned in `TfmdDL`
loaders=None # List of `DataLoader` objects to add `tfms` to
):
"Adds `tfms` to `events` on `loaders`"
if(loaders is None): loaders=range(len(self.loaders))
if not is_listy(loaders): loaders = listify(loaders)
for loader in loaders:
self._add_tfms(tfms,event,loader)
def cuda(self): return self.to(device=default_device())
def cpu(self): return self.to(device=torch.device('cpu'))
@classmethod
def from_dsets(cls,
*ds, # `Datasets` object(s)
path:str|Path='.', # Path to put in `DataLoaders`
bs:int=64, # Size of batch
device=None, # Device to put `DataLoaders`
dl_type=TfmdDL, # Type of `DataLoader`
**kwargs
):
default = (True,) + (False,) * (len(ds)-1)
defaults = {'shuffle': default, 'drop_last': default}
tfms = {k:tuple(Pipeline(kwargs[k]) for i in range_of(ds)) for k in _batch_tfms if k in kwargs}
kwargs = merge(defaults, {k: tuplify(v, match=ds) for k,v in kwargs.items() if k not in _batch_tfms}, tfms)
kwargs = [{k: v[i] for k,v in kwargs.items()} for i in range_of(ds)]
return cls(*[dl_type(d, bs=bs, **k) for d,k in zip(ds, kwargs)], path=path, device=device)
@classmethod
def from_dblock(cls,
dblock, # `DataBlock` object
source, # Source of data. Can be `Path` to files
path:str|Path='.', # Path to put in `DataLoaders`
bs:int=64, # Size of batch
val_bs:int=None, # Size of batch for validation `DataLoader`
shuffle:bool=True, # Whether to shuffle data
device=None, # Device to put `DataLoaders`
**kwargs
):
return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle=shuffle, device=device, **kwargs)
_docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)",
train="Training `DataLoader`",
valid="Validation `DataLoader`",
train_ds="Training `Dataset`",
valid_ds="Validation `Dataset`",
to="Use `device`",
add_tfms="Add `tfms` to `loaders` for `event",
cuda="Use accelerator if available",
cpu="Use the cpu",
new_empty="Create a new empty version of `self` with the same transforms",
from_dblock="Create a dataloaders from a given `dblock`")
# %% ../../nbs/03_data.core.ipynb 50
class FilteredBase:
"Base class for lists with subsets"
_dl_type,_dbunch_type = TfmdDL,DataLoaders
def __init__(self, *args, dl_type=None, **kwargs):
if dl_type is not None: self._dl_type = dl_type
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(*args, **kwargs)
@property
def n_subsets(self): return len(self.splits)
def _new(self, items, **kwargs): return super()._new(items, splits=self.splits, **kwargs)
def subset(self): raise NotImplemented
def dataloaders(self,
bs:int=64, # Batch size
shuffle_train:bool=None, # (Deprecated, use `shuffle`) Shuffle training `DataLoader`
shuffle:bool=True, # Shuffle training `DataLoader`
val_shuffle:bool=False, # Shuffle validation `DataLoader`
n:int=None, # Size of `Datasets` used to create `DataLoader`
path:str|Path='.', # Path to put in `DataLoaders`
dl_type:TfmdDL=None, # Type of `DataLoader`
dl_kwargs:list=None, # List of kwargs to pass to individual `DataLoader`s
device:torch.device=None, # Device to put `DataLoaders`
drop_last:bool=None, # Drop last incomplete batch, defaults to `shuffle`
val_bs:int=None, # Validation batch size, defaults to `bs`
**kwargs
) -> DataLoaders:
if shuffle_train is not None:
shuffle=shuffle_train
warnings.warn('`shuffle_train` is deprecated. Use `shuffle` instead.',DeprecationWarning)
if device is None: device=default_device()
if dl_kwargs is None: dl_kwargs = [{}] * self.n_subsets
if dl_type is None: dl_type = self._dl_type
if drop_last is None: drop_last = shuffle
val_kwargs={k[4:]:v for k,v in kwargs.items() if k.startswith('val_')}
def_kwargs = {'bs':bs,'shuffle':shuffle,'drop_last':drop_last,'n':n,'device':device}
dl = dl_type(self.subset(0), **merge(kwargs,def_kwargs, dl_kwargs[0]))
def_kwargs = {'bs':bs if val_bs is None else val_bs,'shuffle':val_shuffle,'n':None,'drop_last':False}
dls = [dl] + [dl.new(self.subset(i), **merge(kwargs,def_kwargs,val_kwargs,dl_kwargs[i]))
for i in range(1, self.n_subsets)]
return self._dbunch_type(*dls, path=path, device=device)
FilteredBase.train,FilteredBase.valid = add_props(lambda i,x: x.subset(i))
# %% ../../nbs/03_data.core.ipynb 52
class TfmdLists(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self,
items:list, # Items to apply `Transform`s to
tfms:MutableSequence|Pipeline, # `Transform`(s) or `Pipeline` to apply
use_list:bool=None, # Use `list` in `L`
do_setup:bool=True, # Call `setup()` for `Transform`
split_idx:int=None, # Apply `Transform`(s) to training or validation set. `0` for training set and `1` for validation set
train_setup:bool=True, # Apply `Transform`(s) only on training `DataLoader`
splits:list=None, # Indices for training and validation sets
types=None, # Types of data in `items`
verbose:bool=False, # Print verbose output
dl_type:TfmdDL=None # Type of `DataLoader`
):
super().__init__(items, use_list=use_list)
if dl_type is not None: self._dl_type = dl_type
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdLists): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, split_idx=split_idx)
store_attr('types,split_idx')
if do_setup:
pv(f"Setting up {self.tfms}", verbose)
self.setup(train_setup=train_setup)
def _new(self, items, split_idx=None, **kwargs):
split_idx = ifnone(split_idx,self.split_idx)
try: return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, split_idx=split_idx, **kwargs)
except IndexError as e:
e.args = [f"Tried to grab subset {i} in the Dataset, but it contained no items.\n\t{e.args[0]}"]
raise
def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i)
def _after_item(self, o): return self.tfms(o)
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def __iter__(self): return (self[i] for i in range(len(self)))
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def decode(self, o, **kwargs): return self.tfms.decode(o, **kwargs)
def __call__(self, o, **kwargs): return self.tfms.__call__(o, **kwargs)
def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1))
def new_empty(self): return self._new([])
def setup(self,
train_setup:bool=True # Apply `Transform`(s) only on training `DataLoader`
):
self.tfms.setup(self, train_setup)
if len(self) != 0:
x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
self.types = []
for f in self.tfms.fs:
self.types.append(getattr(f, 'input_types', type(x)))
x = f(x)
self.types.append(type(x))
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
self.pretty_types = '\n'.join([f' - {t}' for t in types])
def infer_idx(self, x):
# TODO: check if we really need this, or can simplify
idx = 0
for t in self.types:
if isinstance(x, t): break
idx += 1
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
pretty_types = '\n'.join([f' - {t}' for t in types])
assert idx < len(self.types), f"Expected an input of type in \n{pretty_types}\n but got {type(x)}"
return idx
def infer(self, x):
return compose_tfms(x, tfms=self.tfms.fs[self.infer_idx(x):], split_idx=self.split_idx)
def __getitem__(self, idx):
res = super().__getitem__(idx)
if self._after_item is None: return res
return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
# %% ../../nbs/03_data.core.ipynb 53
add_docs(TfmdLists,
setup="Transform setup with self",
decode="From `Pipeline`",
show="From `Pipeline`",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
infer="Apply `self.tfms` to `x` starting at the right tfm depending on the type of `x`",
new_empty="A new version of `self` but with no items")
# %% ../../nbs/03_data.core.ipynb 54
def decode_at(o, idx):
"Decoded item at `idx`"
return o.decode(o[idx])
# %% ../../nbs/03_data.core.ipynb 55
def show_at(o, idx, **kwargs):
"Show item at `idx`",
return o.show(o[idx], **kwargs)
# %% ../../nbs/03_data.core.ipynb 73
@docs
@delegates(TfmdLists)
class Datasets(FilteredBase):
"A dataset that creates a tuple from each `tfms`"
def __init__(self,
items:list=None, # List of items to create `Datasets`
tfms:MutableSequence|Pipeline=None, # List of `Transform`(s) or `Pipeline` to apply
tls:TfmdLists=None, # If None, `self.tls` is generated from `items` and `tfms`
n_inp:int=None, # Number of elements in `Datasets` tuple that should be considered part of input
dl_type=None, # Default type of `DataLoader` used when function `FilteredBase.dataloaders` is called
**kwargs
):
super().__init__(dl_type=dl_type)
self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
def __getitem__(self, it):
res = tuple([tl[it] for tl in self.tls])
return res if is_indexer(it) else list(zip(*res))
def __getattr__(self,k): return gather_attrs(self, k, 'tls')
def __dir__(self): return super().__dir__() + gather_attr_names(self, 'tls')
def __len__(self): return len(self.tls[0])
def __iter__(self): return (self[i] for i in range(len(self)))
def __repr__(self): return coll_repr(self)
def decode(self, o, full=True): return tuple(tl.decode(o_, full=full) for o_,tl in zip(o,tuplify(self.tls, match=o)))
def subset(self, i): return type(self)(tls=L(tl.subset(i) for tl in self.tls), n_inp=self.n_inp)
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs)
def overlapping_splits(self): return self.tls[0].overlapping_splits()
def new_empty(self): return type(self)(tls=[tl.new_empty() for tl in self.tls], n_inp=self.n_inp)
@property
def splits(self): return self.tls[0].splits
@property
def split_idx(self): return self.tls[0].tfms.split_idx
@property
def items(self): return self.tls[0].items
@items.setter
def items(self, v):
for tl in self.tls: tl.items = v
def show(self, o, ctx=None, **kwargs):
for o_,tl in zip(o,self.tls): ctx = tl.show(o_, ctx=ctx, **kwargs)
return ctx
@contextmanager
def set_split_idx(self, i):
old_split_idx = self.split_idx
for tl in self.tls: tl.tfms.split_idx = i
try: yield self
finally:
for tl in self.tls: tl.tfms.split_idx = old_split_idx
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`",
dataloaders="Get a `DataLoaders`",
overlapping_splits="All splits that are in more than one split",
subset="New `Datasets` that only includes subset `i`",
new_empty="Create a new empty version of the `self`, keeping only the transforms",
set_split_idx="Contextmanager to use the same `Datasets` with another `split_idx`"
)
# %% ../../nbs/03_data.core.ipynb 107
def test_set(
dsets:Datasets|TfmdLists, # Map- or iterable-style dataset from which to load the data
test_items, # Items in test dataset
rm_tfms=None, # Start index of `Transform`(s) from validation set in `dsets` to apply
with_labels:bool=False # Whether the test items contain labels
):
"Create a test set from `test_items` using validation transforms of `dsets`"
if isinstance(dsets, Datasets):
tls = dsets.tls if with_labels else dsets.tls[:dsets.n_inp]
test_tls = [tl._new(test_items, split_idx=1) for tl in tls]
if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]
else: rm_tfms = tuplify(rm_tfms, match=test_tls)
for i,j in enumerate(rm_tfms): test_tls[i].tfms.fs = test_tls[i].tfms.fs[j:]
return Datasets(tls=test_tls)
elif isinstance(dsets, TfmdLists):
test_tl = dsets._new(test_items, split_idx=1)
if rm_tfms is None: rm_tfms = dsets.infer_idx(get_first(test_items))
test_tl.tfms.fs = test_tl.tfms.fs[rm_tfms:]
return test_tl
else: raise Exception(f"This method requires using the fastai library to assemble your data. Expected a `Datasets` or a `TfmdLists` but got {dsets.__class__.__name__}")
# %% ../../nbs/03_data.core.ipynb 112
@patch
@delegates(TfmdDL.__init__)
def test_dl(self:DataLoaders,
test_items, # Items in test dataset
rm_type_tfms=None, # Start index of `Transform`(s) from validation set in `dsets` to apply
with_labels:bool=False, # Whether the test items contain labels
**kwargs
):
"Create a test dataloader from `test_items` using validation transforms of `dls`"
test_ds = test_set(self.valid_ds, test_items, rm_tfms=rm_type_tfms, with_labels=with_labels
) if isinstance(self.valid_ds, (Datasets, TfmdLists)) else test_items
return self.valid.new(test_ds, **kwargs)
| 24,537 | 44.609665 | 172 | py |
fastai | fastai-master/fastai/data/all.py | from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
from .block import *
| 140 | 19.142857 | 28 | py |
fastai | fastai-master/fastai/data/block.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/06_data.block.ipynb.
# %% ../../nbs/06_data.block.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
# %% auto 0
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'RegressionBlock', 'DataBlock']
# %% ../../nbs/06_data.block.ipynb 6
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self,
type_tfms:list=None, # One or more `Transform`s
item_tfms:list=None, # `ItemTransform`s, applied on an item
batch_tfms:list=None, # `Transform`s or `RandTransform`s, applied by batch
dl_type:TfmdDL=None, # Task specific `TfmdDL`, defaults to `TfmdDL`
dls_kwargs:dict=None, # Additional arguments to be passed to `DataLoaders`
):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
# %% ../../nbs/06_data.block.ipynb 7
def CategoryBlock(
vocab:MutableSequence|pd.Series=None, # List of unique class names
sort:bool=True, # Sort the classes alphabetically
add_na:bool=False, # Add `#na#` to `vocab`
):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, sort=sort, add_na=add_na))
# %% ../../nbs/06_data.block.ipynb 8
def MultiCategoryBlock(
encoded:bool=False, # Whether the data comes in one-hot encoded
vocab:MutableSequence|pd.Series=None, # List of unique class names
add_na:bool=False, # Add `#na#` to `vocab`
):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
# %% ../../nbs/06_data.block.ipynb 9
def RegressionBlock(
n_out:int=None, # Number of output values
):
"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c=n_out))
# %% ../../nbs/06_data.block.ipynb 11
from inspect import isfunction,ismethod
# %% ../../nbs/06_data.block.ipynb 12
def _merge_grouper(o):
if isinstance(o, LambdaType): return id(o)
elif isinstance(o, type): return o
elif (isfunction(o) or ismethod(o)): return o.__qualname__
return o.__class__
# %% ../../nbs/06_data.block.ipynb 13
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), _merge_grouper)
return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
# %% ../../nbs/06_data.block.ipynb 16
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`."
get_x=get_items=splitter=get_y = None
blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
_msg = "If you wanted to compose several transforms in your getter don't forget to wrap them in a `Pipeline`."
def __init__(self,
blocks:list=None, # One or more `TransformBlock`s
dl_type:TfmdDL=None, # Task specific `TfmdDL`, defaults to `block`'s dl_type or`TfmdDL`
getters:list=None, # Getter functions applied to results of `get_items`
n_inp:int=None, # Number of inputs
item_tfms:list=None, # `ItemTransform`s, applied on an item
batch_tfms:list=None, # `Transform`s or `RandTransform`s, applied by batch
**kwargs,
):
blocks = L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for b in blocks:
if getattr(b, 'dl_type', None) is not None: self.dl_type = b.dl_type
if dl_type is not None: self.dl_type = dl_type
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.n_inp = ifnone(n_inp, max(1, len(blocks)-1))
self.getters = ifnone(getters, [noop]*len(self.type_tfms))
if self.get_x:
if len(L(self.get_x)) != self.n_inp:
raise ValueError(f'get_x contains {len(L(self.get_x))} functions, but must contain {self.n_inp} (one for each input)\n{self._msg}')
self.getters[:self.n_inp] = L(self.get_x)
if self.get_y:
n_targs = len(self.getters) - self.n_inp
if len(L(self.get_y)) != n_targs:
raise ValueError(f'get_y contains {len(L(self.get_y))} functions, but must contain {n_targs} (one for each target)\n{self._msg}')
self.getters[self.n_inp:] = L(self.get_y)
if kwargs: raise TypeError(f'invalid keyword arguments: {", ".join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(
lambda g,tt: (g.fs if isinstance(g, Pipeline) else L(g)) + tt)
def new(self,
item_tfms:list=None, # `ItemTransform`s, applied on an item
batch_tfms:list=None, # `Transform`s or `RandTransform`s, applied by batch
):
self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls,
blocks:list =None, # One or more `TransformBlock`s
getters:list =None, # Getter functions applied to results of `get_items`
get_items:callable=None, # A function to get items
**kwargs,
):
if getters is None: getters = L(ItemGetter(i) for i in range(2 if blocks is None else len(L(blocks))))
get_items = _zip if get_items is None else compose(get_items, _zip)
return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self,
source, # The data source
verbose:bool=False, # Show verbose messages
) -> Datasets:
self.source = source ; pv(f"Collecting items from {source}", verbose)
items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
splits = (self.splitter or RandomSplitter())(items)
pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self,
source, # The data source
path:str='.', # Data source and default `Learner` path
verbose:bool=False, # Show verbose messages
**kwargs
) -> DataLoaders:
dsets = self.datasets(source, verbose=verbose)
kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
_docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
datasets="Create a `Datasets` object from `source`",
dataloaders="Create a `DataLoaders` object from `source`")
# %% ../../nbs/06_data.block.ipynb 31
def _short_repr(x):
if isinstance(x, tuple): return f'({", ".join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{", ".join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {"x".join([str(d) for d in x.shape])}'
# %% ../../nbs/06_data.block.ipynb 33
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
name = f.name
try:
x = f(x)
if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
# %% ../../nbs/06_data.block.ipynb 34
from .load import _collate_types
def _find_fail_collate(s):
s = L(*s)
for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
shapes = [getattr(o[i], 'shape', None) for o in s]
return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{','.join([str(s) for s in shapes])}"
# %% ../../nbs/06_data.block.ipynb 35
@patch
def summary(self:DataBlock,
source, # The data source
bs:int=4, # The batch size
show_batch:bool=False, # Call `show_batch` after the summary
**kwargs, # Additional keyword arguments to `show_batch`
):
"Steps through the transform pipeline for one batch, and optionally calls `show_batch(**kwargs)` on the transient `Dataloaders`."
print(f"Setting-up type transforms pipelines")
dsets = self.datasets(source, verbose=True)
print("\nBuilding one sample")
for tl in dsets.train.tls:
_apply_pipeline(tl.tfms, get_first(dsets.train.items))
print(f"\nFinal sample: {dsets.train[0]}\n\n")
dls = self.dataloaders(source, bs=bs, verbose=True)
print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
print(f"\nAdding the next {bs-1} samples")
s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
else:
print("No item_tfms to apply")
s = [dls.train.after_item(dsets.train[i]) for i in range(bs)]
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
s = _apply_pipeline(dls.train.before_batch, s)
else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
b = dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
except Exception as e:
print("Error! It's not possible to collate your items in a batch")
why = _find_fail_collate(s)
print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
b = to_device(b, dls.device)
b = _apply_pipeline(dls.train.after_batch, b)
else: print("\nNo batch_tfms to apply")
if show_batch: dls.show_batch(**kwargs)
| 11,106 | 44.150407 | 147 | py |
fastai | fastai-master/fastai/data/external.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/04_data.external.ipynb.
# %% ../../nbs/04_data.external.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from fastdownload import FastDownload
from functools import lru_cache
import fastai.data
# %% auto 0
__all__ = ['fastai_cfg', 'fastai_path', 'URLs', 'untar_data']
# %% ../../nbs/04_data.external.ipynb 24
@lru_cache(maxsize=None)
def fastai_cfg() -> Config: # Config that contains default download paths for `data`, `model`, `storage` and `archive`
"`Config` object for fastai's `config.ini`"
return Config(Path(os.getenv('FASTAI_HOME', '~/.fastai')), 'config.ini', create=dict(
data = 'data', archive = 'archive', storage = 'tmp', model = 'models'))
# %% ../../nbs/04_data.external.ipynb 27
def fastai_path(folder:str) -> Path:
"Local path to `folder` in `Config`"
return fastai_cfg().path(folder)
# %% ../../nbs/04_data.external.ipynb 30
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
MDL = 'http://files.fast.ai/models/'
GOOGLE = 'https://storage.googleapis.com/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
URL = f'{S3}sample/'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_AUDI = f'{S3}audio/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{S3_COCO}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
ML_100k = 'https://files.grouplens.org/datasets/movielens/ml-100k.zip'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette2.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette2-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette2-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof2.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof2-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof2-320.tgz'
IMAGEWANG = f'{S3_IMAGE}imagewang.tgz'
IMAGEWANG_160 = f'{S3_IMAGE}imagewang-160.tgz'
IMAGEWANG_320 = f'{S3_IMAGE}imagewang-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
# Audio classification datasets
MACAQUES = f'{GOOGLE}ml-animal-sounds-datasets/macaques.zip'
ZEBRA_FINCH = f'{GOOGLE}ml-animal-sounds-datasets/zebra_finch.zip'
# Medical Imaging datasets
#SKIN_LESION = f'{S3_IMAGELOC}skin_lesion.tgz'
SIIM_SMALL = f'{S3_IMAGELOC}siim_small.tgz'
TCGA_SMALL = f'{S3_IMAGELOC}tcga_small.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd.tgz'
WT103_BWD = f'{S3_MODEL}wt103-bwd.tgz'
def path(
url:str='.', # File to download
c_key:str='archive' # Key in `Config` where to save URL
) -> Path:
"Local path where to download based on `c_key`"
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='model' else 'data')/fname
if local_path.exists(): return local_path
return fastai_path(c_key)/fname
# %% ../../nbs/04_data.external.ipynb 35
def untar_data(
url:str, # File to download
archive:Path=None, # Optional override for `Config`'s `archive` key
data:Path=None, # Optional override for `Config`'s `data` key
c_key:str='data', # Key in `Config` where to extract file
force_download:bool=False, # Setting to `True` will overwrite any existing copy of data
base:str='~/.fastai' # Directory containing config file and base of relative paths
) -> Path: # Path to extracted file(s)
"Download `url` using `FastDownload.get`"
d = FastDownload(fastai_cfg(), module=fastai.data, archive=archive, data=data, base=base)
return d.get(url, force=force_download, extract_key=c_key)
| 6,020 | 42.948905 | 118 | py |
fastai | fastai-master/fastai/data/load.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/02_data.load.ipynb.
# %% ../../nbs/02_data.load.ipynb 3
from __future__ import annotations
from ..torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
# %% auto 0
__all__ = ['fa_collate', 'fa_convert', 'SkipItemException', 'collate_error', 'DataLoader']
# %% ../../nbs/02_data.load.ipynb 8
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor,_get_shared_seed = Inf.count,None,2,noop
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers,pin_memory_device):
self.dataset,self.default,self.worker_init_fn,self.pin_memory_device = self,d,_wif,pin_memory_device
store_attr('d,pin_memory,num_workers,timeout,persistent_workers,pin_memory_device')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
# %% ../../nbs/02_data.load.ipynb 9
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
# %% ../../nbs/02_data.load.ipynb 11
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
# %% ../../nbs/02_data.load.ipynb 13
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
# %% ../../nbs/02_data.load.ipynb 15
def collate_error(e:Exception, batch):
"Raises error when the batch could not collate, stating what items in the batch are different sizes and their types"
err = f'Error when trying to collate the data into batches with fa_collate, at least two tensors in the batch are not the same size.\n\n'
# we need to iterate through the entire batch and find a mismatch
length = len(batch[0])
for idx in range(length): # for each type in the batch
for i, item in enumerate(batch):
if i == 0: shape_a, type_a = item[idx].shape, item[idx].__class__.__name__
elif item[idx].shape != shape_a:
shape_b = item[idx].shape
if shape_a != shape_b:
err += f'Mismatch found on axis {idx} of the batch and is of type `{type_a}`:\n\tItem at index 0 has shape: {shape_a}\n\tItem at index {i} has shape: {shape_b}\n\nPlease include a transform in `after_item` that ensures all data of type {type_a} is the same size'
e.args = [err]
raise
# %% ../../nbs/02_data.load.ipynb 18
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False,
pin_memory_device='', **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0: num_workers = 0
if sys.platform == "darwin" and num_workers > 0: num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers,
pin_memory_device=pin_memory_device)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# pin_memory causes tuples to be converted to lists, so convert them back to tuples
if self.pin_memory and type(b) == list: b = tuple(b)
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b):
try: return (fa_collate,fa_convert)[self.prebatched](b)
except Exception as e:
if not self.prebatched: collate_error(e,b)
raise
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
# %% ../../nbs/02_data.load.ipynb 19
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
| 10,928 | 53.919598 | 282 | py |
fastai | fastai-master/fastai/data/transforms.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/05_data.transforms.ipynb.
# %% ../../nbs/05_data.transforms.ipynb 2
from __future__ import annotations
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from sklearn.model_selection import train_test_split
import posixpath
# %% auto 0
__all__ = ['image_extensions', 'get_files', 'FileGetter', 'get_image_files', 'ImageGetter', 'get_text_files', 'ItemGetter',
'AttrGetter', 'RandomSplitter', 'TrainTestSplitter', 'IndexSplitter', 'EndSplitter', 'GrandparentSplitter',
'FuncSplitter', 'MaskSplitter', 'FileSplitter', 'ColSplitter', 'RandomSubsetSplitter', 'parent_label',
'RegexLabeller', 'ColReader', 'CategoryMap', 'Categorize', 'Category', 'MultiCategorize', 'MultiCategory',
'OneHotEncode', 'EncodedMultiCategorize', 'RegressionSetup', 'get_c', 'ToTensor', 'IntToFloatTensor',
'broadcast_vec', 'Normalize']
# %% ../../nbs/05_data.transforms.ipynb 10
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# %% ../../nbs/05_data.transforms.ipynb 11
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
# %% ../../nbs/05_data.transforms.ipynb 16
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 18
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
# %% ../../nbs/05_data.transforms.ipynb 19
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
# %% ../../nbs/05_data.transforms.ipynb 22
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified"
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 25
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
# %% ../../nbs/05_data.transforms.ipynb 26
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
# %% ../../nbs/05_data.transforms.ipynb 28
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
# %% ../../nbs/05_data.transforms.ipynb 32
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(list(torch.randperm(len(o)).numpy()))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
# %% ../../nbs/05_data.transforms.ipynb 36
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,
stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 38
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 40
def EndSplitter(valid_pct=0.2, valid_last=True):
"Create function that splits `items` between train/val with `valid_pct` at the end if `valid_last` else at the start. Useful for ordered data."
assert 0<valid_pct<1, "valid_pct must be in (0,1)"
def _inner(o):
idxs = range_of(o)
cut = int(valid_pct * len(o))
return (idxs[:-cut], idxs[-cut:]) if valid_last else (idxs[cut:],idxs[:cut])
return _inner
# %% ../../nbs/05_data.transforms.ipynb 42
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# %% ../../nbs/05_data.transforms.ipynb 43
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 47
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 49
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 51
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read_text().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 53
def ColSplitter(col='is_valid', on=None):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
c = o.iloc[:,col] if isinstance(col, int) else o[col]
if on is None: valid_idx = c.values.astype('bool')
elif is_listy(on): valid_idx = c.isin(on)
else: valid_idx = c == on
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
# %% ../../nbs/05_data.transforms.ipynb 55
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(list(torch.randperm(len(o)).numpy()))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
# %% ../../nbs/05_data.transforms.ipynb 59
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
# %% ../../nbs/05_data.transforms.ipynb 63
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
o = str(o).replace(os.sep, posixpath.sep)
res = self.matcher(o)
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# %% ../../nbs/05_data.transforms.ipynb 68
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) or not c in getattr(r, '_fields', []) else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
# %% ../../nbs/05_data.transforms.ipynb 72
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
# %% ../../nbs/05_data.transforms.ipynb 78
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o):
try:
return TensorCategory(self.vocab.o2i[o])
except KeyError as e:
raise KeyError(f"Label '{o}' was not included in the training dataset") from e
def decodes(self, o): return Category (self.vocab [o])
# %% ../../nbs/05_data.transforms.ipynb 79
class Category(str, ShowTitle): _show_args = {'label': 'category'}
# %% ../../nbs/05_data.transforms.ipynb 84
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
# %% ../../nbs/05_data.transforms.ipynb 85
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
# %% ../../nbs/05_data.transforms.ipynb 87
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# %% ../../nbs/05_data.transforms.ipynb 92
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
# %% ../../nbs/05_data.transforms.ipynb 94
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
# %% ../../nbs/05_data.transforms.ipynb 96
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if nested_attr(dls, 'train.after_item.c', False): return dls.train.after_item.c
if nested_attr(dls, 'train.after_batch.c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
# %% ../../nbs/05_data.transforms.ipynb 109
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# %% ../../nbs/05_data.transforms.ipynb 111
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return (o.long() / self.div_mask).long()
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
# %% ../../nbs/05_data.transforms.ipynb 114
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# %% ../../nbs/05_data.transforms.ipynb 115
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
| 16,883 | 43.083551 | 147 | py |
fastai | fastai-master/fastai/text/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/30_text.core.ipynb.
# %% ../../nbs/30_text.core.ipynb 1
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
# %% auto 0
__all__ = ['UNK', 'PAD', 'BOS', 'EOS', 'FLD', 'TK_REP', 'TK_WREP', 'TK_UP', 'TK_MAJ', 'WordTokenizer', 'fn_counter_pkl',
'fn_lengths_pkl', 'eu_langs', 'SubwordTokenizer', 'spec_add_spaces', 'rm_useless_spaces', 'replace_rep',
'replace_wrep', 'fix_html', 'replace_all_caps', 'replace_maj', 'lowercase', 'replace_space', 'BaseTokenizer',
'SpacyTokenizer', 'TokenizeWithRules', 'tokenize1', 'parallel_tokenize', 'tokenize_folder', 'tokenize_files',
'tokenize_texts', 'tokenize_df', 'tokenize_csv', 'load_tokenized_csv', 'Tokenizer', 'SentencePieceTokenizer']
# %% ../../nbs/30_text.core.ipynb 5
import html
# %% ../../nbs/30_text.core.ipynb 8
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
# %% ../../nbs/30_text.core.ipynb 9
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
# %% ../../nbs/30_text.core.ipynb 10
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
# %% ../../nbs/30_text.core.ipynb 12
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
# %% ../../nbs/30_text.core.ipynb 14
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
# %% ../../nbs/30_text.core.ipynb 17
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
# %% ../../nbs/30_text.core.ipynb 19
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
# %% ../../nbs/30_text.core.ipynb 22
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
# %% ../../nbs/30_text.core.ipynb 24
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
# %% ../../nbs/30_text.core.ipynb 26
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
# %% ../../nbs/30_text.core.ipynb 28
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
# %% ../../nbs/30_text.core.ipynb 30
def replace_maj(t):
"Replace tokens in Sentence Case by their lower version and add `TK_MAJ` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
# %% ../../nbs/30_text.core.ipynb 32
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
# %% ../../nbs/30_text.core.ipynb 33
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
# %% ../../nbs/30_text.core.ipynb 34
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
# %% ../../nbs/30_text.core.ipynb 37
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
# %% ../../nbs/30_text.core.ipynb 39
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
import spacy
from spacy.symbols import ORTH
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang)
for w in self.special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(map(str,items), batch_size=self.buf_sz))
# %% ../../nbs/30_text.core.ipynb 40
WordTokenizer = SpacyTokenizer
# %% ../../nbs/30_text.core.ipynb 42
class TokenizeWithRules:
"A wrapper around `tok` which applies `rules`, then tokenizes, then applies `post_rules`"
def __init__(self, tok, rules=None, post_rules=None):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
# %% ../../nbs/30_text.core.ipynb 46
@delegates(TokenizeWithRules)
def tokenize1(text, tok, **kwargs):
"Call `TokenizeWithRules` with a single text"
return first(TokenizeWithRules(tok=tok, **kwargs)([text]))
# %% ../../nbs/30_text.core.ipynb 48
def parallel_tokenize(items, tok=None, rules=None, n_workers=defaults.cpus, **kwargs):
"Calls optional `setup` on `tok` before launching `TokenizeWithRules` using `parallel_gen"
if tok is None: tok = WordTokenizer()
if hasattr(tok, 'setup'): tok.setup(items, rules)
return parallel_gen(TokenizeWithRules, items, tok=tok, rules=rules, n_workers=n_workers, **kwargs)
# %% ../../nbs/30_text.core.ipynb 54
fn_counter_pkl = 'counter.pkl'
fn_lengths_pkl = 'lengths.pkl'
# %% ../../nbs/30_text.core.ipynb 55
def _tokenize_files(func, files, path, output_dir=None, output_names=None, n_workers=defaults.cpus, rules=None, tok=None,
encoding='utf8', skip_if_exists=False):
"Tokenize text `files` in parallel using `n_workers`"
if tok is None: tok = WordTokenizer()
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
if skip_if_exists and output_dir.exists(): return output_dir
output_dir.mkdir(exist_ok=True)
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
rules = partial(Path.read_text, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
lengths,counter = {},Counter()
for i,tok in parallel_tokenize(files, tok, rules, n_workers=n_workers):
out = func(i,output_dir)
out.mk_write(' '.join(tok), encoding=encoding)
lengths[str(files[i].relative_to(path))] = len(tok)
counter.update(tok)
save_pickle(output_dir/fn_lengths_pkl, lengths)
save_pickle(output_dir/fn_counter_pkl, counter)
return output_dir
# %% ../../nbs/30_text.core.ipynb 56
@delegates(_tokenize_files)
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, skip_if_exists=True, **kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
files = get_files(path, extensions=extensions, recurse=True, folders=folders)
def _f(i,output_dir): return output_dir/files[i].relative_to(path)
return _tokenize_files(_f, files, path, skip_if_exists=skip_if_exists, **kwargs)
# %% ../../nbs/30_text.core.ipynb 58
@delegates(_tokenize_files)
def tokenize_files(files, path, output_dir, output_names=None, **kwargs):
"Tokenize text `files` in parallel using `n_workers`"
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
def _f(i,output_dir): return output_dir/output_names[i]
return _tokenize_files(_f, files, path, output_dir=output_dir, **kwargs)
# %% ../../nbs/30_text.core.ipynb 60
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
# %% ../../nbs/30_text.core.ipynb 62
def tokenize_texts(texts, n_workers=defaults.cpus, rules=None, tok=None):
"Tokenize `texts` in parallel using `n_workers`"
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
outputs = L(parallel_tokenize(texts, tok=tok, rules=rules, n_workers=n_workers)
).sorted().itemgot(1)
return outputs
# %% ../../nbs/30_text.core.ipynb 63
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok=None, tok_text_col="text"):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers` and stores them in `df[tok_text_col]`"
text_cols = [df.columns[c] if isinstance(c, int) else c for c in L(text_cols)]
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok, rules, n_workers=n_workers)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res[tok_text_col] = outputs
res[f'{tok_text_col}_length'] = [len(o) for o in outputs]
return res,Counter(outputs.concat())
# %% ../../nbs/30_text.core.ipynb 65
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok=None, header='infer', chunksize=50000):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok=tok)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
save_pickle(outname.with_suffix('.pkl'), cnt)
# %% ../../nbs/30_text.core.ipynb 66
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = tuple(out[txt_col].str.split(' '))
return out,load_pickle(fname.with_suffix('.pkl'))
# %% ../../nbs/30_text.core.ipynb 71
class Tokenizer(Transform):
"Provides a consistent `Transform` interface to tokenizers operating on `DataFrame`s and folders"
input_types = (str, list, L, tuple, Path)
def __init__(self, tok, rules=None, counter=None, lengths=None, mode=None, sep=' '):
if isinstance(tok,type): tok=tok()
store_attr('tok,counter,lengths,mode,sep')
self.rules = defaults.text_proc_rules if rules is None else rules
@classmethod
@delegates(tokenize_df, keep=True)
def from_df(cls, text_cols, tok=None, rules=None, sep=' ', **kwargs):
if tok is None: tok = WordTokenizer()
res = cls(tok, rules=rules, mode='df')
res.kwargs,res.train_setup = merge({'tok': tok}, kwargs),False
res.text_cols,res.sep = text_cols,sep
default_val = inspect.signature(tokenize_df).parameters['tok_text_col'].default
res.tok_text_col = kwargs.get('tok_text_col', default_val)
return res
@classmethod
@delegates(tokenize_folder, keep=True)
def from_folder(cls, path, tok=None, rules=None, **kwargs):
path = Path(path)
if tok is None: tok = WordTokenizer()
output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
res.path,res.output_dir = path,output_dir
return res
def setups(self, dsets):
if not self.mode == 'df' or not isinstance(dsets.items, pd.DataFrame): return
dsets.items,count = tokenize_df(dsets.items, self.text_cols, rules=self.rules, **self.kwargs)
if self.counter is None: self.counter = count
if self.lengths is None: self.lengths = dsets.items[f'{self.tok_text_col}_length'].values
return dsets
def encodes(self, o:Path):
if self.mode=='folder' and str(o).startswith(str(self.path)):
tok = self.output_dir/o.relative_to(self.path)
return L(tok.read_text(encoding='UTF-8').split(' '))
else: return self._tokenize1(o.read_text())
def encodes(self, o:str): return self._tokenize1(o)
def _tokenize1(self, o): return first(self.tok([compose(*self.rules)(o)]))
def get_lengths(self, items):
if self.lengths is None: return None
if self.mode == 'df':
if isinstance(items, pd.DataFrame) and f'{self.tok_text_col}_length' in items.columns:
return items[f'{self.tok_text_col}_length'].values
if self.mode == 'folder':
try:
res = [self.lengths[str(Path(i).relative_to(self.path))] for i in items]
if len(res) == len(items): return res
except: return None
def decodes(self, o): return TitledStr(self.sep.join(o))
# %% ../../nbs/30_text.core.ipynb 75
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
# %% ../../nbs/30_text.core.ipynb 76
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"SentencePiece tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece!=0.1.90,!=0.1.91`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return max(res,29)
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1 --minloglevel=2",
f"--user_defined_symbols={','.join(spec_tokens)} --hard_vocab_limit=false"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules=None):
from sentencepiece import SentencePieceProcessor
if rules is None: rules = []
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
sp_model = self.train(raw_text_path)
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
return {'sp_model': sp_model}
def __call__(self, items):
if self.tok is None: self.setup(items)
for t in items: yield self.tok.EncodeAsPieces(t)
# %% ../../nbs/30_text.core.ipynb 77
SubwordTokenizer = SentencePieceTokenizer
| 17,521 | 45.110526 | 121 | py |
fastai | fastai-master/fastai/text/learner.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/37_text.learner.ipynb.
# %% ../../nbs/37_text.learner.ipynb 1
from __future__ import annotations
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
from ..callback.progress import *
# %% auto 0
__all__ = ['match_embeds', 'load_ignore_keys', 'clean_raw_keys', 'load_model_text', 'TextLearner', 'decode_spec_tokens',
'LMLearner', 'language_model_learner', 'text_classifier_learner', 'show_results', 'plot_top_losses']
# %% ../../nbs/37_text.learner.ipynb 8
def match_embeds(
old_wgts:dict, # Embedding weights
old_vocab:list, # Vocabulary of corpus used for pre-training
new_vocab:list # Current corpus vocabulary
) -> dict:
"Convert the embedding in `old_wgts` to go from `old_vocab` to `new_vocab`."
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
# %% ../../nbs/37_text.learner.ipynb 12
def _get_text_vocab(dls:DataLoaders) -> list:
"Get vocabulary from `DataLoaders`"
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# %% ../../nbs/37_text.learner.ipynb 13
def load_ignore_keys(
model, # Model architecture
wgts:dict # Model weights
) -> tuple:
"Load `wgts` in `model` ignoring the names of the keys, just taking parameters in order"
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
# %% ../../nbs/37_text.learner.ipynb 14
def _rm_module(n:str):
t = n.split('.')
for i in range(len(t)-1, -1, -1):
if t[i] == 'module':
t.pop(i)
break
return '.'.join(t)
# %% ../../nbs/37_text.learner.ipynb 15
#For previous versions compatibility, remove for release
def clean_raw_keys(wgts:dict):
keys = list(wgts.keys())
for k in keys:
t = k.split('.module')
if f'{_rm_module(k)}_raw' in keys: del wgts[k]
return wgts
# %% ../../nbs/37_text.learner.ipynb 16
#For previous versions compatibility, remove for release
def load_model_text(
file:str, # File name of saved text model
model, # Model architecture
opt:Optimizer, # `Optimizer` used to fit the model
with_opt:bool=None, # Enable to load `Optimizer` state
device:int|str|torch.device=None, # Sets the device, uses 'cpu' if unspecified
strict:bool=True # Whether to strictly enforce the keys of `file`s state dict match with the model `Module.state_dict`
):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(clean_raw_keys(model_state), strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# %% ../../nbs/37_text.learner.ipynb 17
@delegates(Learner.__init__)
class TextLearner(Learner):
"Basic class for a `Learner` in NLP."
def __init__(self,
dls:DataLoaders, # Text `DataLoaders`
model, # A standard PyTorch model
alpha:float=2., # Param for `RNNRegularizer`
beta:float=1., # Param for `RNNRegularizer`
moms:tuple=(0.8,0.7,0.8), # Momentum for `Cosine Annealing Scheduler`
**kwargs
):
super().__init__(dls, model, moms=moms, **kwargs)
self.add_cbs(rnn_cbs())
def save_encoder(self,
file:str # Filename for `Encoder`
):
"Save the encoder to `file` in the model directory"
if rank_distrib(): return # don't save if child proc
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file, self.path/self.model_dir, ext='.pth'))
def load_encoder(self,
file:str, # Filename of the saved encoder
device:int|str|torch.device=None # Device used to load, defaults to `dls` device
):
"Load the encoder `file` from the model directory, optionally ensuring it's on `device`"
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
wgts = torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device)
encoder.load_state_dict(clean_raw_keys(wgts))
self.freeze()
return self
def load_pretrained(self,
wgts_fname:str, # Filename of saved weights
vocab_fname:str, # Saved vocabulary filename in pickle format
model=None # Model to load parameters from, defaults to `Learner.model`
):
"Load a pretrained model and adapt it to the data vocabulary."
old_vocab = load_pickle(vocab_fname)
new_vocab = _get_text_vocab(self.dls)
distrib_barrier()
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, clean_raw_keys(wgts))
self.freeze()
return self
#For previous versions compatibility. Remove at release
@delegates(load_model_text)
def load(self,
file:str, # Filename of saved model
with_opt:bool=None, # Enable to load `Optimizer` state
device:int|str|torch.device=None, # Device used to load, defaults to `dls` device
**kwargs
):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model_text(file, self.model, self.opt, device=device, **kwargs)
return self
# %% ../../nbs/37_text.learner.ipynb 26
def decode_spec_tokens(tokens):
"Decode the special tokens in `tokens`"
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# %% ../../nbs/37_text.learner.ipynb 28
class LMLearner(TextLearner):
"Add functionality to `TextLearner` when dealing with a language model"
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, no_bar=False,
decoder=decode_spec_tokens, only_last_word=False):
"Return `text` and the `n_words` that come after"
self.model.reset()
idxs = idxs_all = self.dls.test_dl([text]).items[0].to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = idxs_all = torch.cat([idxs_all, idxs.new([idx])])
if only_last_word: idxs = idxs[-1][None]
num = self.dls.train_ds.numericalize
tokens = [num.vocab[i] for i in idxs_all if num.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer.sep
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# %% ../../nbs/37_text.learner.ipynb 33
from .models.core import _model_meta
# %% ../../nbs/37_text.learner.ipynb 34
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# %% ../../nbs/37_text.learner.ipynb 35
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., backwards=False, pretrained=True, pretrained_fnames=None, **kwargs):
"Create a `Learner` with a language model from `dls` and `arch`."
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url] , c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames)
return learn
# %% ../../nbs/37_text.learner.ipynb 42
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, backwards=False, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, y_range=None, **kwargs):
"Create a `Learner` with a text classifier from `dls` and `arch`."
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config, y_range=y_range,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url], c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# %% ../../nbs/37_text.learner.ipynb 46
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# %% ../../nbs/37_text.learner.ipynb 47
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# %% ../../nbs/37_text.learner.ipynb 48
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
| 13,867 | 44.618421 | 133 | py |
fastai | fastai-master/fastai/text/data.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/31_text.data.ipynb.
# %% ../../nbs/31_text.data.ipynb 1
from __future__ import annotations
from ..torch_basics import *
from ..data.all import *
from .core import *
# %% auto 0
__all__ = ['pad_input', 'reverse_text', 'make_vocab', 'TensorText', 'LMTensorText', 'Numericalize', 'LMDataLoader', 'show_batch',
'Pad_Input', 'pad_chunk', 'pad_input_chunk', 'Pad_Chunk', 'SortedDL', 'TextBlock', 'TextDataLoaders']
# %% ../../nbs/31_text.data.ipynb 6
def reverse_text(x): return x.flip(0)
# %% ../../nbs/31_text.data.ipynb 10
def make_vocab(count, min_freq=3, max_vocab=60000, special_toks=None):
"Create a vocab of `max_vocab` size from `Counter` `count` with items present more than `min_freq`"
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
special_toks = ifnone(special_toks, defaults.text_spec_tok)
for o in reversed(special_toks): #Make sure all special tokens are in the vocab
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
# %% ../../nbs/31_text.data.ipynb 13
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
TensorText.__doc__ = "Semantic type for a tensor representing text"
LMTensorText.__doc__ = "Semantic type for a tensor representing text in language modeling"
# %% ../../nbs/31_text.data.ipynb 14
class Numericalize(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000, special_toks=None):
store_attr('vocab,min_freq,max_vocab,special_toks')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if getattr(dsets, 'counter', None) is not None else Counter(p for o in dsets for p in o)
if self.special_toks is None and hasattr(dsets, 'special_toks'):
self.special_toks = dsets.special_toks
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab, special_toks=self.special_toks)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o)
# %% ../../nbs/31_text.data.ipynb 22
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
# %% ../../nbs/31_text.data.ipynb 23
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
# %% ../../nbs/31_text.data.ipynb 24
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
# %% ../../nbs/31_text.data.ipynb 25
#TODO: add backward
@delegates()
class LMDataLoader(TfmdDL):
"A `DataLoader` suitable for language modeling"
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
# The "-1" is to allow for final label, we throw away the end that's less than bs
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq is None: seq = 0
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=None, **kwargs):
lens = self.lens.coll if dataset is None else None
seq_len = self.seq_len if seq_len is None else seq_len
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
# %% ../../nbs/31_text.data.ipynb 35
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# %% ../../nbs/31_text.data.ipynb 36
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
# %% ../../nbs/31_text.data.ipynb 39
class Pad_Input(ItemTransform):
def encodes(self,samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect `samples` and adds padding"
self.pad_idx = pad_idx
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
def decodes(self, o:TensorText):
pad_idx = self.pad_idx if hasattr(self,'pad_idx') else 1
return o[o != pad_idx]
pad_input=Pad_Input()
# %% ../../nbs/31_text.data.ipynb 44
def pad_chunk(x,pad_idx=1, pad_first=True, seq_len=72, pad_len=10):
"Pad `x` by adding padding by chunks of size `seq_len`"
l = pad_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_chunk, pad_res])
return retain_type(x1, x)
# %% ../../nbs/31_text.data.ipynb 47
@delegates(pad_chunk)
def pad_input_chunk(samples, n_inp=1,**kwargs):
"Pad `samples` by adding padding by chunks of size `seq_len`"
max_len = max([len(s[n]) for s in samples for n in range(n_inp)])
padeds = [[pad_chunk(s[n],pad_len=max_len,**kwargs) for n in range(n_inp) ] for s in samples]
return [(*p, *s[n_inp:]) for p,s in zip(padeds,samples)]
# %% ../../nbs/31_text.data.ipynb 52
class Pad_Chunk(DisplayedTransform):
"Pad `samples` by adding padding by chunks of size `seq_len`"
def __init__(self, pad_idx=1, pad_first=True, seq_len=72,decode=True,**kwargs):
store_attr('pad_idx, pad_first, seq_len,seq_len')
super().__init__(**kwargs)
def before_call(self, b):
"Set `self.max_len` before encodes"
self.max_len = max([x.shape[0] for xs in b for x in xs if isinstance(x,TensorText)])
def __call__(self, b, **kwargs):
self.before_call(b)
return super().__call__(tuple(b), **kwargs)
def encodes(self, x:TensorText):
return pad_chunk(x,pad_idx=self.pad_idx, pad_first=self.pad_first, seq_len=self.seq_len, pad_len=self.max_len)
def decodes(self, o:TensorText):
return o[o != self.pad_idx] if self.decode else o
# %% ../../nbs/31_text.data.ipynb 56
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
"A `DataLoader` that goes throught the item in the order given by `sort_func`"
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
if 'val_res' in kwargs and kwargs['val_res'] is not None: res = kwargs['val_res']
else: res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
# %% ../../nbs/31_text.data.ipynb 62
class TextBlock(TransformBlock):
"A `TransformBlock` for texts"
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, backwards=False, **kwargs):
type_tfms = [tok_tfm, Numericalize(vocab, **kwargs)]
if backwards: type_tfms += [reverse_text]
return super().__init__(type_tfms=type_tfms,
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={'seq_len': seq_len} if is_lm else {'before_batch': Pad_Chunk(seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a dataframe using `text_cols`"
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a `path`"
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
# %% ../../nbs/31_text.data.ipynb 71
class TextDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for NLP problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, splitter=None, backwards=False, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
if splitter is None:
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len, backwards, tok=tok_tfm)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, tok_text_col="text", seq_len=72, backwards=False, **kwargs):
"Create from `df` in `path` with `valid_pct`"
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len, backwards, tok=tok_tfm)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader(tok_text_col),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `csv` file in `path/csv_fname`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv)
| 14,582 | 49.811847 | 141 | py |
fastai | fastai-master/fastai/text/models/core.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../../nbs/33_text.models.core.ipynb.
# %% ../../../nbs/33_text.models.core.ipynb 1
from __future__ import annotations
from ...data.all import *
from ..core import *
from .awdlstm import *
# %% auto 0
__all__ = ['LinearDecoder', 'SequentialRNN', 'get_language_model', 'SentenceEncoder', 'masked_concat_pool',
'PoolingLinearClassifier', 'get_text_classifier']
# %% ../../../nbs/33_text.models.core.ipynb 5
_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD,
'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split},}
# Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER,
# 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split,
# 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split},
# TransformerXL: {'hid_name':'d_model',
# 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split,
# 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}}
# %% ../../../nbs/33_text.models.core.ipynb 7
class LinearDecoder(Module):
"To go on top of a RNNCore module and create a Language Model."
initrange=0.1
def __init__(self,
n_out:int, # Number of output channels
n_hid:int, # Number of features in encoder last layer output
output_p:float=0.1, # Input dropout probability
tie_encoder:nn.Module=None, # If module is supplied will tie decoder weight to `tie_encoder.weight`
bias:bool=True # If `False` the layer will not learn additive bias
):
self.decoder = nn.Linear(n_hid, n_out, bias=bias)
self.decoder.weight.data.uniform_(-self.initrange, self.initrange)
self.output_dp = RNNDropout(output_p)
if bias: self.decoder.bias.data.zero_()
if tie_encoder: self.decoder.weight = tie_encoder.weight
def forward(self, input):
dp_inp = self.output_dp(input)
return self.decoder(dp_inp), input, dp_inp
# %% ../../../nbs/33_text.models.core.ipynb 10
class SequentialRNN(nn.Sequential):
"A sequential module that passes the reset call to its children."
def reset(self):
for c in self.children(): getcallable(c, 'reset')()
# %% ../../../nbs/33_text.models.core.ipynb 12
def get_language_model(
arch, # Function or class that can generate a language model architecture
vocab_sz:int, # Size of the vocabulary
config:dict=None, # Model configuration dictionary
drop_mult:float=1. # Multiplicative factor to scale all dropout probabilities in `config`
) -> SequentialRNN: # Language model with `arch` encoder and linear decoder
"Create a language model from `arch` and its `config`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_lm']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])
init = config.pop('init') if 'init' in config else None
encoder = arch(vocab_sz, **config)
enc = encoder.encoder if tie_weights else None
decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)
model = SequentialRNN(encoder, decoder)
return model if init is None else model.apply(init)
# %% ../../../nbs/33_text.models.core.ipynb 17
def _pad_tensor(t:Tensor, bs:int) -> Tensor:
if t.size(0) < bs: return torch.cat([t, t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
# %% ../../../nbs/33_text.models.core.ipynb 18
class SentenceEncoder(Module):
"Create an encoder over `module` that can process a full sentence."
def __init__(self,
bptt:int, # Backpropagation through time
module:nn.Module, # A module that can process up to [`bs`, `bptt`] tokens
pad_idx:int=1, # Padding token id
max_len:int=None # Maximal output length
):
store_attr('bptt,module,pad_idx,max_len')
def reset(self): getcallable(self.module, 'reset')()
def forward(self, input):
bs,sl = input.size()
self.reset()
mask = input == self.pad_idx
outs,masks = [],[]
for i in range(0, sl, self.bptt):
#Note: this expects that sequence really begins on a round multiple of bptt
real_bs = (input[:,i] != self.pad_idx).long().sum()
o = self.module(input[:real_bs,i: min(i+self.bptt, sl)])
if self.max_len is None or sl-i <= self.max_len:
outs.append(o)
masks.append(mask[:,i: min(i+self.bptt, sl)])
outs = torch.cat([_pad_tensor(o, bs) for o in outs], dim=1)
mask = torch.cat(masks, dim=1)
return outs,mask
# %% ../../../nbs/33_text.models.core.ipynb 21
def masked_concat_pool(
output:Tensor, # Output of sentence encoder
mask:Tensor, # Boolean mask as returned by sentence encoder
bptt:int # Backpropagation through time
) -> Tensor: # Concatenation of [last_hidden, max_pool, avg_pool]
"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]"
lens = output.shape[1] - mask.long().sum(dim=1)
last_lens = mask[:,-bptt:].long().sum(dim=1)
avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)
avg_pool.div_(lens.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),-last_lens-1], max_pool, avg_pool], 1) #Concat pooling.
return x
# %% ../../../nbs/33_text.models.core.ipynb 24
class PoolingLinearClassifier(Module):
"Create a linear classifier with pooling"
def __init__(self,
dims:list, # List of hidden sizes for MLP as `int`s
ps:list, # List of dropout probabilities as `float`s
bptt:int, # Backpropagation through time
y_range:tuple=None # Tuple of (low, high) output value bounds
):
if len(ps) != len(dims)-1: raise ValueError("Number of layers and dropout values do not match.")
acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]
layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]
if y_range is not None: layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*layers)
self.bptt = bptt
def forward(self, input):
out,mask = input
x = masked_concat_pool(out, mask, self.bptt)
x = self.layers(x)
return x, out, out
# %% ../../../nbs/33_text.models.core.ipynb 27
def get_text_classifier(
arch:callable, # Function or class that can generate a language model architecture
vocab_sz:int, # Size of the vocabulary
n_class:int, # Number of classes
seq_len:int=72, # Backpropagation through time
config:dict=None, # Encoder configuration dictionary
drop_mult:float=1., # Multiplicative factor to scale all dropout probabilities in `config`
lin_ftrs:list=None, # List of hidden sizes for classifier head as `int`s
ps:list=None, # List of dropout probabilities for classifier head as `float`s
pad_idx:int=1, # Padding token id
max_len:int=72*20, # Maximal output length for `SentenceEncoder`
y_range:tuple=None # Tuple of (low, high) output value bounds
):
"Create a text classifier from `arch` and its `config`, maybe `pretrained`"
meta = _model_meta[arch]
cfg = meta['config_clas'].copy()
cfg.update(ifnone(config, {}))
config = cfg
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx, max_len=max_len)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps, bptt=seq_len, y_range=y_range))
return model if init is None else model.apply(init)
| 8,308 | 47.30814 | 113 | py |
fastai | fastai-master/fastai/text/models/awdlstm.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../../../nbs/32_text.models.awdlstm.ipynb.
# %% ../../../nbs/32_text.models.awdlstm.ipynb 1
from __future__ import annotations
from ...data.all import *
from ..core import *
# %% auto 0
__all__ = ['awd_lstm_lm_config', 'awd_lstm_clas_config', 'dropout_mask', 'RNNDropout', 'WeightDropout', 'EmbeddingDropout',
'AWD_LSTM', 'awd_lstm_lm_split', 'awd_lstm_clas_split']
# %% ../../../nbs/32_text.models.awdlstm.ipynb 7
def dropout_mask(
x:Tensor, # Source tensor, output will be of the same type as `x`
sz:list, # Size of the dropout mask as `int`s
p:float # Dropout probability
) -> Tensor: # Multiplicative dropout mask
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new_empty(*sz).bernoulli_(1-p).div_(1-p)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 9
class RNNDropout(Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p:float=0.5): self.p=p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.data, (x.size(0), 1, *x.shape[2:]), self.p)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 13
class WeightDropout(Module):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self,
module:nn.Module, # Wrapped module
weight_p:float, # Weight dropout probability
layer_names:str|MutableSequence='weight_hh_l0' # Name(s) of the parameters to apply dropout to
):
self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)
for layer in self.layer_names:
#Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
delattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
setattr(self.module, layer, w.clone())
if isinstance(self.module, (nn.RNNBase, nn.modules.rnn.RNNBase)):
self.module.flatten_parameters = self._do_nothing
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
if self.training: w = F.dropout(raw_w, p=self.weight_p)
else: w = raw_w.clone()
setattr(self.module, layer, w)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
return self.module(*args)
def reset(self):
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
setattr(self.module, layer, raw_w.clone())
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self): pass
# %% ../../../nbs/32_text.models.awdlstm.ipynb 15
class EmbeddingDropout(Module):
"Apply dropout with probability `embed_p` to an embedding layer `emb`."
def __init__(self,
emb:nn.Embedding, # Wrapped embedding layer
embed_p:float # Embdedding layer dropout probability
):
self.emb,self.embed_p = emb,embed_p
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.emb.weight.size(0),1)
mask = dropout_mask(self.emb.weight.data, size, self.embed_p)
masked_embed = self.emb.weight * mask
else: masked_embed = self.emb.weight
if scale: masked_embed.mul_(scale)
return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm,
self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 17
class AWD_LSTM(Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange=0.1
def __init__(self,
vocab_sz:int, # Size of the vocabulary
emb_sz:int, # Size of embedding vector
n_hid:int, # Number of features in hidden state
n_layers:int, # Number of LSTM layers
pad_token:int=1, # Padding token id
hidden_p:float=0.2, # Dropout probability for hidden state between layers
input_p:float=0.6, # Dropout probability for LSTM stack input
embed_p:float=0.1, # Embedding layer dropout probabillity
weight_p:float=0.5, # Hidden-to-hidden wight dropout probability for LSTM layers
bidir:bool=False # If set to `True` uses bidirectional LSTM layers
):
store_attr('emb_sz,n_hid,n_layers,pad_token')
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,
bidir, weight_p, l) for l in range(n_layers)])
self.encoder.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
self.reset()
def forward(self, inp:Tensor, from_embeds:bool=False):
bs,sl = inp.shape[:2] if from_embeds else inp.shape
if bs!=self.bs: self._change_hidden(bs)
output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))
new_hidden = []
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append(new_h)
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = to_detach(new_hidden, cpu=False, gather=False)
return output
def _change_hidden(self, bs):
self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])
if self.bs > bs: return (self.hidden[l][0][:,:bs].contiguous(), self.hidden[l][1][:,:bs].contiguous())
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
# %% ../../../nbs/32_text.models.awdlstm.ipynb 22
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])
return groups.map(params)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 23
awd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 24
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]
groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]
groups = L(groups + [model[1]])
return groups.map(params)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 25
awd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
| 8,529 | 45.868132 | 131 | py |
fastai | fastai-master/dev_nbs/course/crappify.py | from fastai.basics import *
from PIL import Image, ImageDraw, ImageFont
def resize_to(img, targ_sz, use_min=False):
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
class crappifier():
def __init__(self, path_lr, path_hr):
self.path_lr = path_lr
self.path_hr = path_hr
def __call__(self, fn):
dest = self.path_lr/fn.relative_to(self.path_hr)
dest.parent.mkdir(parents=True, exist_ok=True)
img = Image.open(fn)
targ_sz = resize_to(img, 96, use_min=True)
img = img.resize(targ_sz, resample=BILINEAR).convert('RGB')
w,h = img.size
q = random.randint(10,70)
ImageDraw.Draw(img).text((random.randint(0,w//2),random.randint(0,h//2)), str(q), fill=(255,255,255))
img.save(dest, quality=q)
| 859 | 33.4 | 109 | py |
fastai | fastai-master/nbs/dltest.py | from fastai.torch_basics import *
from fastai.data.load import *
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
if __name__ == "__main__":
# It can be reproduced in Linux by uncommenting this line
# multiprocessing.set_start_method('spawn')
print("start main ...")
dl = RandDL(bs=4, num_workers=2, drop_last=True)
print(L(dl).map(len))
| 434 | 28 | 61 | py |
fastai | fastai-master/nbs/examples/train_imdbclassifier.py | from fastai.basics import *
from fastai.callback.all import *
from fastai.distributed import *
from fastprogress import fastprogress
from fastai.callback.mixup import *
from fastcore.script import *
from fastai.text.all import *
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def pr(s):
if rank_distrib()==0: print(s)
@call_parse
def main(
lr: Param("base Learning rate", float)=1e-2,
bs: Param("Batch size", int)=64,
epochs:Param("Number of epochs", int)=5,
fp16: Param("Use mixed precision training", store_true)=False,
dump: Param("Print model; don't train", int)=0,
runs: Param("Number of times to repeat training", int)=1,
):
"Training of IMDB classifier."
path = rank0_first(untar_data, URLs.IMDB)
dls = TextDataLoaders.from_folder(path, bs=bs, valid='test')
for run in range(runs):
pr(f'Rank[{rank_distrib()}] Run: {run}; epochs: {epochs}; lr: {lr}; bs: {bs}')
learn = rank0_first(text_classifier_learner, dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)
if dump: pr(learn.model); exit()
if fp16: learn = learn.to_fp16()
# Workaround: In PyTorch 1.4, need to set DistributedDataParallel() with find_unused_parameters=True,
# to avoid a crash that only happens in distributed mode of text_classifier_learner.fine_tune()
if num_distrib() > 1 and torch.__version__.startswith("1.4"): DistributedTrainer.fup = True
with learn.distrib_ctx(): # distributed traing requires "-m fastai.launch"
learn.fine_tune(epochs, lr)
| 1,574 | 37.414634 | 109 | py |
fastai | fastai-master/nbs/examples/train_wt2.py | from fastai.basics import *
from fastai.text.all import *
from fastai.callback.all import *
from fastcore.script import *
def istitle(line):
return len(re.findall(r'^ = [^=]* = $', line)) != 0
def read_file(filename):
articles = L()
with open(filename, encoding='utf8') as f:
lines = f.readlines()
current_article = ''
for i,line in enumerate(lines):
current_article += line
if i < len(lines)-2 and lines[i+1] == ' \n' and istitle(lines[i+2]):
articles.append(current_article.split(' '))
current_article = ''
articles.append(current_article.split(' '))
return articles
def get_data(bs, sl):
path = untar_data(URLs.WIKITEXT_TINY)
train = LM_Dataset(read_file(path/'train.txt'), bs=bs, seq_len=sl, shuffle=True)
valid = LM_Dataset(read_file(path/'valid.txt'), bs=bs, seq_len=sl)
count = Counter([p for t in train.ds for p in t])
vocab = make_vocab(count)
train_ds = TfmdLists(train, tfms=Numericalize(vocab), as_item=False, wrap_l=False)
valid_ds = TfmdLists(valid, tfms=Numericalize(vocab), as_item=False, wrap_l=False)
train_dl = TfmdDL(train_ds, bs=bs, sampler=LM_Sampler(train), num_workers=8)
valid_dl = TfmdDL(valid_ds, bs=bs, sampler=LM_Sampler(valid), num_workers=8)
return DataLoaders(train_dl, valid_dl),vocab
@call_parse
def main(bs:Param("Batch size", int)=104,
sl:Param("Sequence length", int)=72):
dls,vocab = get_data(bs, sl)
config = awd_lstm_lm_config.copy()
config.update({'input_p': 0.6, 'output_p': 0.4, 'weight_p': 0.5, 'embed_p': 0.1, 'hidden_p': 0.2})
model = get_language_model(AWD_LSTM, len(vocab), config=config)
opt_func = partial(Adam, wd=0.1, eps=1e-7)
alpha,beta = (3,2)
cbs = [MixedPrecision(clip=0.1), ModelResetter, RNNRegularizer(alpha, beta)]
learn = Learner(model, dls, loss_func=CrossEntropyLossFlat(), opt_func=opt_func, cbs=cbs, metrics=[accuracy, Perplexity()])
learn.fit_one_cycle(90, 5e-3, moms=(0.8,0.7,0.8), div=10)
| 2,024 | 42.085106 | 127 | py |
fastai | fastai-master/nbs/examples/dataloader_spawn.py | #!/usr/bin/env python
# coding: utf-8
from fastai.vision.all import *
def get_data(url, presize, resize):
path = untar_data(url)
#print(Normalize.from_stats(*imagenet_stats))
return DataBlock(
blocks=(ImageBlock, CategoryBlock), get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label, item_tfms=Resize(presize),
batch_tfms=aug_transforms(min_scale=0.5, size=resize),
).dataloaders(path, bs=128)
def block(ni, nf): return ConvLayer(ni, nf, stride=2)
def get_model():
return nn.Sequential(
block(3, 16),
block(16, 32),
block(32, 64),
block(64, 128),
block(128, 256),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(256, dls.c))
def get_learner(dls, m):
return Learner(dls, m, loss_func=nn.CrossEntropyLoss(), metrics=accuracy
)
if __name__ == "__main__":
multiprocessing.set_start_method('spawn')
dls = get_data(URLs.IMAGENETTE_160, 160, 128)
resnet_model = get_model()
learn = get_learner(dls, resnet_model)
learn.lr_find()
| 1,124 | 28.605263 | 76 | py |
fastai | fastai-master/nbs/examples/migrating_ignite.py | # The fastai DataLoader is a drop-in replacement for Pytorch's;
# no code changes are required other than changing the import line
from fastai.data.load import DataLoader
import torch
from torch import nn
from torch.optim import SGD
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available(): device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0))
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time(engine):
tqdm.write(
"{} took {} seconds".format(trainer.last_event_name.name, trainer.state.times[trainer.last_event_name.name]))
trainer.run(train_loader, max_epochs=epochs)
| 3,837 | 39.829787 | 121 | py |
fastai | fastai-master/nbs/examples/migrating_catalyst.py | # The fastai DataLoader is a drop-in replacement for Pytorch's;
# no code changes are required other than changing the import line
from fastai.data.load import DataLoader
import os,torch
from torch.nn import functional as F
from catalyst import dl
from catalyst.data.cv import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.utils import metrics
model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32),
"valid": DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32),
}
class CustomRunner(dl.Runner):
def predict_batch(self, batch): return self.model(batch[0].to(self.device).view(batch[0].size(0), -1))
def _handle_batch(self, batch):
x, y = batch
y_hat = self.model(x.view(x.size(0), -1))
loss = F.cross_entropy(y_hat, y)
accuracy01, accuracy03 = metrics.accuracy(y_hat, y, topk=(1, 3))
self.batch_metrics.update(
{"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
)
if self.is_train_loader:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
| 1,319 | 34.675676 | 109 | py |
fastai | fastai-master/nbs/examples/mnist_items.py | from fastai.vision.all import *
items = get_image_files(untar_data(URLs.MNIST))
splits = GrandparentSplitter(train_name='training', valid_name='testing')(items)
tds = Datasets(items, [PILImageBW.create, [parent_label, Categorize()]], splits=splits)
if __name__ == '__main__':
data = tds.dataloaders(bs=256, after_item=[ToTensor(), IntToFloatTensor()]).cuda()
learn = vision_learner(data, resnet18)
learn.fit_one_cycle(1, 1e-2)
| 442 | 35.916667 | 87 | py |
fastai | fastai-master/nbs/examples/migrating_fastai.py | from fastai.vision.all import *
from torchvision import datasets, transforms
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
batch_size,test_batch_size = 256,512
epochs,lr = 1,1e-2
kwargs = {'num_workers': 1, 'pin_memory': True}
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True, **kwargs)
if __name__ == '__main__':
data = DataLoaders(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
learn.fit_one_cycle(epochs, lr)
| 1,183 | 38.466667 | 87 | py |
fastai | fastai-master/nbs/examples/train_imagenette.py | from fastai.basics import *
from fastai.vision.all import *
from fastai.callback.all import *
from fastai.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
from fastai.vision.models.xresnet import *
from fastai.callback.mixup import *
from fastcore.script import *
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def pr(s):
if rank_distrib()==0: print(s)
def get_dls(size, woof, pct_noise, bs, sh=0., workers=None):
assert pct_noise in [0,5,50], '`pct_noise` must be 0,5 or 50.'
if size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
else : path = URLs.IMAGEWOOF if woof else URLs.IMAGENETTE
source = untar_data(path)
workers = ifnone(workers,min(8,num_cpus()))
blocks=(ImageBlock, CategoryBlock)
tfms = [RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)]
batch_tfms = [Normalize.from_stats(*imagenet_stats)]
if sh: batch_tfms.append(RandomErasing(p=0.3, max_count=3, sh=sh))
csv_file = 'noisy_imagewoof.csv' if woof else 'noisy_imagenette.csv'
inp = pd.read_csv(source/csv_file)
dblock = DataBlock(blocks=blocks,
splitter=ColSplitter(),
get_x=ColReader('path', pref=source),
get_y=ColReader(f'noisy_labels_{pct_noise}'),
item_tfms=tfms,
batch_tfms=batch_tfms)
return dblock.dataloaders(inp, path=source, bs=bs, num_workers=workers)
@call_parse
def main(
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
pct_noise:Param("Percentage of noise in training set (0,5,50%)", int)=0,
lr: Param("Learning rate", float)=1e-2,
size: Param("Size (px: 128,192,256)", int)=128,
sqrmom:Param("sqr_mom", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("Epsilon", float)=1e-6,
wd: Param("Weight decay", float)=1e-2,
epochs:Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=64,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd,ranger)", str)='ranger',
arch: Param("Architecture", str)='xresnet50',
sh: Param("Random erase max proportion", float)=0.,
sa: Param("Self-attention", store_true)=False,
sym: Param("Symmetry for self-attention", int)=0,
beta: Param("SAdam softplus beta", float)=0.,
act_fn:Param("Activation function", str)='Mish',
fp16: Param("Use mixed precision training", store_true)=False,
pool: Param("Pooling method", str)='AvgPool',
dump: Param("Print model; don't train", int)=0,
runs: Param("Number of times to repeat training", int)=1,
meta: Param("Metadata (ignored)", str)='',
workers: Param("Number of workers", int)=None,
):
"""Training of Imagenette. Call with `python -m fastai.launch` for distributed training.
Note for testing -- the following should result in accuracy top-5 of 75%+:
`python train_imagenette.py --fp16 --epochs 3 --size 192`"""
if opt=='adam' : opt_func = partial(Adam, mom=mom, sqr_mom=sqrmom, eps=eps)
elif opt=='rms' : opt_func = partial(RMSprop, sqr_mom=sqrmom)
elif opt=='sgd' : opt_func = partial(SGD, mom=mom)
elif opt=='ranger': opt_func = partial(ranger, mom=mom, sqr_mom=sqrmom, eps=eps, beta=beta)
dls = rank0_first(get_dls, size, woof, pct_noise, bs, sh=sh, workers=workers)
pr(f'pct_noise: {pct_noise}; epochs: {epochs}; lr: {lr}; size: {size}; sqrmom: {sqrmom}; mom: {mom}; eps: {eps}')
m,act_fn,pool = [globals()[o] for o in (arch,act_fn,pool)]
for run in range(runs):
pr(f'Run: {run}')
learn = Learner(dls, m(n_out=10, act_cls=act_fn, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \
metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy())
if dump: pr(learn.model); exit()
if fp16: learn = learn.to_fp16()
cbs = MixUp(mixup) if mixup else []
n_gpu = torch.cuda.device_count()
# Both context managers work fine for single GPU too
ctx = learn.distrib_ctx if num_distrib() and n_gpu else learn.parallel_ctx
with ctx(): learn.fit_flat_cos(epochs, lr, wd=wd, cbs=cbs)
| 4,191 | 46.636364 | 117 | py |
fastai | fastai-master/nbs/examples/migrating_lightning.py | # The fastai DataLoader is a drop-in replacement for Pytorch's;
# no code changes are required other than changing the import line
from fastai.data.load import DataLoader
import os,torch
from torch.nn import functional as F
from torchvision.datasets import MNIST
from torchvision import transforms
from pytorch_lightning.core.lightning import LightningModule
class LitModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x): return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
x,y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return {'loss': loss}
def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001)
def train_dataloader(self):
dataset = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor())
return DataLoader(dataset, batch_size=32, num_workers=4, shuffle=True)
def validation_step(self, batch, batch_idx):
x,y = batch
y_hat = self(x)
return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
print(avg_loss)
return {'val_loss': avg_loss}
def val_dataloader(self):
# TODO: do a real train/val split
dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
loader = DataLoader(dataset, batch_size=32, num_workers=4)
return loader
| 1,622 | 35.066667 | 97 | py |
fastai | fastai-master/nbs/examples/distrib.py | from fastai.vision.all import *
from fastai.distributed import *
from fastai.vision.models.xresnet import *
path = rank0_first(untar_data, URLs.IMAGEWOOF_320)
dls = DataBlock(
blocks=(ImageBlock, CategoryBlock),
splitter=GrandparentSplitter(valid_name='val'),
get_items=get_image_files, get_y=parent_label,
item_tfms=[RandomResizedCrop(160), FlipItem(0.5)],
batch_tfms=Normalize.from_stats(*imagenet_stats)
).dataloaders(path, path=path, bs=64)
learn = Learner(dls, xresnet50(n_out=10), metrics=[accuracy,top_k_accuracy]).to_fp16()
with learn.distrib_ctx(): learn.fit_flat_cos(2, 1e-3, cbs=MixUp(0.1))
| 625 | 35.823529 | 86 | py |
fastai | fastai-master/nbs/examples/distrib_pytorch.py | from fastai.vision.all import *
from fastai.distributed import *
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
batch_size,test_batch_size = 256,512
epochs,lr = 5,1e-2
kwargs = {'num_workers': 1, 'pin_memory': True}
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True, **kwargs)
if __name__ == '__main__':
data = DataLoaders(train_loader, test_loader)
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
with learn.distrib_ctx(): learn.fit_one_cycle(epochs, lr)
| 1,275 | 38.875 | 87 | py |
fastai | fastai-master/nbs/examples/migrating_pytorch.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), -1)
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx*len(data), len(train_loader.dataset),
100. * batch_idx/len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss,correct = 0,0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss/len(test_loader.dataset), correct, len(test_loader.dataset),
100. * correct/len(test_loader.dataset)))
batch_size,test_batch_size = 256,512
epochs,lr = 1,1e-2
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True}
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
if __name__ == '__main__':
for epoch in range(1, epochs+1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
| 2,716 | 38.376812 | 81 | py |
fastai | fastai-master/nbs/examples/train_tabular.py | from fastai.basics import *
from fastai.tabular.all import *
from fastai.callback.all import *
from fastai.distributed import *
from fastprogress import fastprogress
from fastai.callback.mixup import *
from fastcore.script import *
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def pr(s):
if rank_distrib()==0: print(s)
def get_dls(path):
dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary",
cat_names = ['workclass', 'education', 'marital-status', 'occupation',
'relationship', 'race'],
cont_names = ['age', 'fnlwgt', 'education-num'],
procs = [Categorify, FillMissing, Normalize])
return dls
@call_parse
def main(
epochs:Param("Number of epochs", int)=5,
fp16: Param("Use mixed precision training", store_true)=False,
dump: Param("Print model; don't train", int)=0,
runs: Param("Number of times to repeat training", int)=1,
):
"Training of Tabular data 'ADULT_SAMPLE'."
path = rank0_first(untar_data,URLs.ADULT_SAMPLE)
dls = get_dls(path)
pr(f'epochs: {epochs};')
for run in range(runs):
pr(f'Run: {run}')
learn = tabular_learner(dls, metrics=accuracy)
if dump: pr(learn.model); exit()
if fp16: learn = learn.to_fp16()
n_gpu = torch.cuda.device_count()
ctx = learn.distrib_ctx if num_distrib() and n_gpu else learn.parallel_ctx
with ctx(): learn.fit_one_cycle(epochs)
| 1,471 | 32.454545 | 84 | py |
fastai | fastai-master/nbs/examples/mnist_blocks.py | from fastai.vision.all import *
splitter = GrandparentSplitter(train_name='training', valid_name='testing')
mnist = DataBlock(blocks=(ImageBlock(PILImageBW), CategoryBlock),
get_items=get_image_files, splitter=splitter, get_y=parent_label)
if __name__ == '__main__':
data = mnist.dataloaders(untar_data(URLs.MNIST), bs=256)
learn = vision_learner(data, resnet18)
learn.fit_one_cycle(1, 1e-2)
| 425 | 34.5 | 83 | py |
FEMNIST_pytorch | FEMNIST_pytorch-master/femnist.py | from torchvision.datasets import MNIST, utils
from PIL import Image
import os.path
import torch
class FEMNIST(MNIST):
"""
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
"""
resources = [
('https://raw.githubusercontent.com/tao-shen/FEMNIST_pytorch/master/femnist.tar.gz',
'59c65cec646fc57fe92d27d83afdf0ed')]
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(MNIST, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets, self.users_index = torch.load(os.path.join(self.processed_folder, data_file))
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
img = Image.fromarray(img.numpy(), mode='F')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def download(self):
"""Download the FEMNIST data if it doesn't exist in processed_folder already."""
import shutil
if self._check_exists():
return
utils.makedir_exist_ok(self.raw_folder)
utils.makedir_exist_ok(self.processed_folder)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
utils.download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
shutil.move(os.path.join(self.raw_folder, self.training_file), self.processed_folder)
shutil.move(os.path.join(self.raw_folder, self.test_file), self.processed_folder)
| 2,438 | 36.523077 | 110 | py |
LITE | LITE-main/src/tf_dataset_reader.py | import tensorflow as tf
import tensorflow_datasets as tfds
import torch
import torchvision.transforms as T
from PIL import Image
import numpy as np
class TfDatasetReader:
def __init__(self, dataset, task, context_batch_size, target_batch_size, path_to_datasets, image_size, device):
self.dataset = dataset
self.task = task
self.device = device
self.image_size = image_size
self.context_batch_size = context_batch_size
self.target_batch_size = target_batch_size
tf.compat.v1.enable_eager_execution()
train_split = 'train[:{}]'.format(context_batch_size)
ds_context, ds_context_info = tfds.load(
dataset,
split=train_split,
shuffle_files=True,
data_dir=path_to_datasets,
with_info=True
)
self.context_dataset_length = ds_context_info.splits["train"].num_examples
self.context_iterator = ds_context.as_numpy_iterator()
test_split = 'test'
if self.dataset == 'clevr':
test_split = 'validation'
if 'test' in ds_context_info.splits:
# we use the entire test set
ds_target, ds_target_info = tfds.load(
dataset,
split=test_split,
shuffle_files=False,
data_dir=path_to_datasets,
with_info=True)
self.target_dataset_length = ds_target_info.splits["test"].num_examples
else: # there is no test split
# get a second iterator to the training set and skip the training examples
test_split = 'train[{}:]'.format(context_batch_size)
ds_target = tfds.load(
dataset, split=test_split,
shuffle_files=False,
data_dir=path_to_datasets
)
self.target_dataset_length = self.context_dataset_length - context_batch_size
self.target_iterator = ds_target.as_numpy_iterator()
self.transforms = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # normalize to -1 to 1
])
def get_context_batch(self):
return self._get_batch(self.context_iterator, is_target=False)
def get_target_batch(self):
return self._get_batch(self.target_iterator, is_target=True)
def get_context_dataset_length(self):
return self.context_dataset_length
def get_target_dataset_length(self):
return self.target_dataset_length
def _get_batch(self, iterator, is_target):
batch_size = self.target_batch_size if is_target else self.context_batch_size
images = []
labels = []
for i in range(batch_size):
try:
item = iterator.next()
except StopIteration: # the last batch may be less than batch_size
break
# images
images.append(self._prepare_image(item['image']))
# labels
if self.dataset == "clevr":
labels.append(self._get_clevr_label(item, self.task))
elif self.dataset == 'kitti':
labels.append(self._get_kitti_label(item))
elif self.dataset == 'smallnorb':
if self.task == 'azimuth':
labels.append(item['label_azimuth'])
elif self.task == 'elevation':
labels.append(item['label_elevation'])
else:
raise ValueError("Unsupported smallnorb task.")
elif self.dataset == "dsprites":
labels.append(self._get_dsprites_label(item, self.task))
else:
labels.append(item['label'])
labels = np.array(labels)
images = torch.stack(images)
# move the images and labels to the device
images = images.to(self.device)
labels = torch.from_numpy(labels)
if is_target:
labels = labels.type(torch.LongTensor).to(self.device)
else:
labels = labels.to(self.device)
return images, labels
def _get_kitti_label(self, x):
"""Predict the distance to the closest vehicle."""
# Location feature contains (x, y, z) in meters w.r.t. the camera.
vehicles = np.where(x["objects"]["type"] < 3) # Car, Van, Truck.
vehicle_z = np.take(x["objects"]["location"][:, 2], vehicles)
if len(vehicle_z.shape) > 1:
vehicle_z = np.squeeze(vehicle_z, axis=0)
if vehicle_z.size == 0:
vehicle_z = np.array([1000.0])
else:
vehicle_z = np.append(vehicle_z, [1000.0], axis=0)
dist = np.amin(vehicle_z)
# Results in a uniform distribution over three distances, plus one class for "no vehicle".
thrs = np.array([-100.0, 8.0, 20.0, 999.0])
label = np.amax(np.where((thrs - dist) < 0))
return label
def _get_dsprites_label(self, item, task):
num_classes = 16
if task == "location":
predicted_attribute = 'label_x_position'
num_original_classes = 32
elif task == "orientation":
predicted_attribute = 'label_orientation'
num_original_classes = 40
else:
raise ValueError("Bad dsprites task.")
# at the desired number of classes. This is useful for example for grouping
# together different spatial positions.
class_division_factor = float(num_original_classes) / float(num_classes)
return np.floor((item[predicted_attribute]) / class_division_factor).astype(int)
def _get_clevr_label(self, item, task):
if task == "count":
label = len(item["objects"]["size"]) - 3
elif task == "distance":
dist = np.amin(item["objects"]["pixel_coords"][:, 2])
# These thresholds are uniformly spaced and result in more or less balanced
# distribution of classes, see the resulting histogram:
thrs = np.array([0.0, 8.0, 8.5, 9.0, 9.5, 10.0, 100.0])
label = np.amax(np.where((thrs - dist) < 0))
else:
raise ValueError("Bad clevr task.")
return label
def _prepare_image(self, image):
if self.dataset == "smallnorb" or self.dataset == "dsprites":
# grayscale images where the channel needs to be squeezed to keep PIL happy
image = np.squeeze(image)
if self.dataset == "dsprites": # scale images to be in 0 - 255 range to keep PIL happy
image = image * 255.0
im = Image.fromarray(image)
im = im.resize((self.image_size, self.image_size), Image.LANCZOS)
im = im.convert("RGB")
return self.transforms(im)
| 6,764 | 38.104046 | 115 | py |
LITE | LITE-main/src/efficientnet_utils.py | """
The code in this file is substantially based on the code from "A PyTorch implementation of EfficientNet"
by lukemelas that can be found here: https://github.com/lukemelas/EfficientNet-PyTorch
"""
"""Helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
################################################################################
# Help functions for model architecture
################################################################################
# GlobalParams and BlockArgs: Two namedtuples
# Swish and MemoryEfficientSwish: Two implementations of the method
# round_filters and round_repeats:
# Functions to calculate params for scaling model width and depth ! ! !
# get_width_and_height_from_size and calculate_output_image_size
# drop_connect: A structural design
# get_same_padding_conv2d:
# Conv2dDynamicSamePadding
# Conv2dStaticSamePadding
# get_same_padding_maxPool2d:
# MaxPool2dDynamicSamePadding
# MaxPool2dStaticSamePadding
# It's an additional function, not used in EfficientNet,
# but can be used in other model (such as EfficientDet).
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon',
'drop_connect_rate', 'depth_divisor', 'min_depth', 'include_top'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'num_repeat', 'kernel_size', 'stride', 'expand_ratio',
'input_filters', 'output_filters', 'se_ratio', 'id_skip'])
# Set GlobalParams and BlockArgs's defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
# Swish activation function
if hasattr(nn, 'SiLU'):
Swish = nn.SiLU
else:
# For compatibility with old PyTorch versions
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def round_filters(filters, global_params):
"""Calculate and round number of filters based on width multiplier.
Use width_coefficient, depth_divisor and min_depth of global_params.
Args:
filters (int): Filters number to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new_filters: New filters number after calculating.
"""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor # pay attention to this line when using min_depth
# follow the formula transferred from official TensorFlow implementation
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_width_and_height_from_size(x):
"""Obtain height and width from x.
Args:
x (int, tuple or list): Data size.
Returns:
size: A tuple or list (H,W).
"""
if isinstance(x, int):
return x, x
if isinstance(x, list) or isinstance(x, tuple):
return x
else:
raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""Calculates the output image size when using Conv2dSamePadding with a stride.
Necessary for static padding. Thanks to mannatsingh for pointing this out.
Args:
input_image_size (int, tuple or list): Size of input image.
stride (int, tuple or list): Conv2d operation's stride.
Returns:
output_image_size: A list [H,W].
"""
if input_image_size is None:
return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
# Note:
# The following 'SamePadding' functions make output size equal ceil(input size/stride).
# Only when stride equals 1, can the output size be the same as input size.
# Don't be confused by their function names ! ! !
def get_same_padding_conv2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
Conv2dDynamicSamePadding or Conv2dStaticSamePadding.
"""
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
# Tips for 'SAME' mode padding.
# Given the following:
# i: width or height
# s: stride
# k: kernel size
# d: dilation
# p: padding
# Output after Conv2d:
# o = floor((i+p-((k-1)*d+1))/s+1)
# If o equals i, i = floor((i+p-((k-1)*d+1))/s+1),
# => p = (i-1)*s+((k-1)*d+1)-i
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) # change the output size according to stride ! ! !
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
# With the same calculation as Conv2dDynamicSamePadding
def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2,
pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
def get_same_padding_maxPool2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.
"""
if image_size is None:
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
return x
################################################################################
# Helper functions for loading model params
################################################################################
# BlockDecoder: A Class for encoding and decoding BlockArgs
# efficientnet_params: A function to query compound coefficient
# get_model_params and efficientnet:
# Functions to get BlockArgs and GlobalParams for efficientnet
# url_map and url_map_advprop: Dicts of url_map for pretrained weights
# load_pretrained_weights: A function to load pretrained weights
class BlockDecoder(object):
"""Block Decoder for readability,
straight from the official TensorFlow repository.
"""
@staticmethod
def _decode_block_string(block_string):
"""Get a block through a string notation of arguments.
Args:
block_string (str): A string notation of arguments.
Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.
Returns:
BlockArgs: The namedtuple defined at the top of this file.
"""
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
num_repeat=int(options['r']),
kernel_size=int(options['k']),
stride=[int(options['s'][0])],
expand_ratio=int(options['e']),
input_filters=int(options['i']),
output_filters=int(options['o']),
se_ratio=float(options['se']) if 'se' in options else None,
id_skip=('noskip' not in block_string))
@staticmethod
def _encode_block_string(block):
"""Encode a block to a string.
Args:
block (namedtuple): A BlockArgs type argument.
Returns:
block_string: A String form of BlockArgs.
"""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""Decode a list of string notations to specify blocks inside the network.
Args:
string_list (list[str]): A list of strings, each string is a notation of block.
Returns:
blocks_args: A list of BlockArgs namedtuples of block args.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""Encode a list of BlockArgs to a list of strings.
Args:
blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.
Returns:
block_strings: A list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet_params(model_name):
"""Map EfficientNet model name to parameter coefficients.
Args:
model_name (str): Model name to be queried.
Returns:
params_dict[model_name]: A (width,depth,res,dropout) tuple.
"""
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None,
dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000, include_top=True):
"""Create BlockArgs and GlobalParams for efficientnet model.
Args:
width_coefficient (float)
depth_coefficient (float)
image_size (int)
dropout_rate (float)
drop_connect_rate (float)
num_classes (int)
Meaning as the name suggests.
Returns:
blocks_args, global_params.
"""
# Blocks args for the whole model(efficientnet-b0 by default)
# It will be modified in the construction of EfficientNet Class according to model
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25',
'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25',
'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25',
'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
image_size=image_size,
dropout_rate=dropout_rate,
num_classes=num_classes,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
depth_divisor=8,
min_depth=None,
include_top=include_top,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model name.
Args:
model_name (str): Model's name.
override_params (dict): A dict to modify global_params.
Returns:
blocks_args, global_params
"""
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: {}'.format(model_name))
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
# train with Standard methods
# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks)
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
# train with Adversarial Examples(AdvProp)
# check more details in paper(Adversarial Examples Improve Image Recognition)
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
# TODO: add the petrained weights url map of 'efficientnet-l2'
def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False, verbose=True):
"""Loads pretrained weights from weights path or download using url.
Args:
model (Module): The whole model of efficientnet.
model_name (str): Model name of efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.
advprop (bool): Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
"""
if isinstance(weights_path, str):
state_dict = torch.load(weights_path)
else:
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
assert not ret.missing_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(
['_fc.weight', '_fc.bias']), 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
assert not ret.unexpected_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.unexpected_keys)
if verbose:
print('Loaded pretrained weights for {}'.format(model_name)) | 25,146 | 39.494364 | 130 | py |
LITE | LITE-main/src/set_encoder.py | import torch
import torch.nn as nn
"""
Classes and functions required for Set encoding in adaptation networks. Many of the ideas and classes here are
closely related to DeepSets (https://arxiv.org/abs/1703.06114).
"""
def mean_pooling(x):
return torch.mean(x, dim=0, keepdim=True)
class SetEncoder(nn.Module):
"""
Simple set encoder, implementing the DeepSets approach. Used for modeling permutation invariant representations
on sets (mainly for extracting task-level representations from context sets).
"""
def __init__(self):
super(SetEncoder, self).__init__()
self.pre_pooling_fn = SimplePrePoolNet()
self.pooling_fn = mean_pooling
def forward(self, x):
"""
Forward pass through DeepSet SetEncoder. Implements the following computation:
g(X) = rho ( mean ( phi(x) ) )
Where X = (x0, ... xN) is a set of elements x in X (in our case, images from a context set)
and the mean is a pooling operation over elements in the set.
:param x: (torch.tensor) Set of elements X (e.g., for images has shape batch x C x H x W ).
:return: (torch.tensor) Representation of the set, single vector in Rk.
"""
x = self.pre_pooling_fn(x)
x = self.pooling_fn(x)
return x
def pre_pool(self, x):
return self.pre_pooling_fn(x)
class SimplePrePoolNet(nn.Module):
"""
Simple prepooling network for images. Implements the phi mapping in DeepSets networks. In this work we use a
multi-layer convolutional network similar to that in https://openreview.net/pdf?id=rJY0-Kcll.
"""
def __init__(self):
super(SimplePrePoolNet, self).__init__()
self.layer1 = self._make_conv2d_layer(3, 64)
self.layer2 = self._make_conv2d_layer(64, 64)
self.layer3 = self._make_conv2d_layer(64, 64)
self.layer4 = self._make_conv2d_layer(64, 64)
self.layer5 = self._make_conv2d_layer(64, 64)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
@staticmethod
def _make_conv2d_layer(in_maps, out_maps):
return nn.Sequential(
nn.Conv2d(in_maps, out_maps, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_maps),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=False)
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
@property
def output_size(self):
return 64
| 2,669 | 32.797468 | 115 | py |
LITE | LITE-main/src/features.py | import torch
import torch.nn as nn
from efficientnet import film_efficientnet, film_efficientnet_b0_84
def create_feature_extractor(args):
if args.image_size == 84:
feature_extractor = film_efficientnet_b0_84(args.pretrained_model_path)
else:
feature_extractor = film_efficientnet("efficientnet-b0")
# freeze the parameters of feature extractor
for param in feature_extractor.parameters():
param.requires_grad = False
return feature_extractor
def create_film_adapter(feature_extractor, task_dim):
adaptation_layer = FilmLayerGenerator
adaptation_config = feature_extractor.get_adaptation_config()
feature_adapter = FilmAdapter(
layer=adaptation_layer,
adaptation_config=adaptation_config,
task_dim=task_dim
)
return feature_adapter
class BaseFilmLayer(nn.Module):
def __init__(self, num_maps, num_blocks):
super(BaseFilmLayer, self).__init__()
self.num_maps = num_maps
self.num_blocks = num_blocks
self.num_generated_params = 0
def regularization_term(self):
"""
Compute the regularization term for the parameters. Recall, FiLM applies gamma * x + beta. As such, params
gamma and beta are regularized to unity, i.e. ||gamma - 1||_2 and ||beta||_2.
:return: (torch.tensor) Scalar for l2 norm for all parameters according to regularization scheme.
"""
l2_term = 0
for gamma_regularizer, beta_regularizer in zip(self.gamma_regularizers, self.beta_regularizers):
l2_term += (gamma_regularizer ** 2).sum()
l2_term += (beta_regularizer ** 2).sum()
return l2_term
class FilmLayer(BaseFilmLayer):
def __init__(self, num_maps, num_blocks, task_dim=None):
BaseFilmLayer.__init__(self, num_maps, num_blocks)
self.gammas, self.gamma_regularizers = nn.ParameterList(), nn.ParameterList()
self.betas, self.beta_regularizers = nn.ParameterList(), nn.ParameterList()
for i in range(self.num_blocks):
self.gammas.append(nn.Parameter(torch.ones(self.num_maps[i]), requires_grad=True))
self.gamma_regularizers.append(nn.Parameter(nn.init.normal_(torch.empty(num_maps[i]), 0, 0.001), requires_grad=True))
self.betas.append(nn.Parameter(torch.zeros(self.num_maps[i]), requires_grad=True))
self.beta_regularizers.append(nn.Parameter(nn.init.normal_(torch.empty(num_maps[i]), 0, 0.001), requires_grad=True))
def forward(self, x):
block_params = []
for block in range(self.num_blocks):
block_param_dict = {
'gamma': self.gammas[block] * self.gamma_regularizers[block] + torch.ones_like(self.gamma_regularizers[block]),
'beta': self.betas[block] * self.beta_regularizers[block]
}
block_params.append(block_param_dict)
return block_params
class FilmAdapter(nn.Module):
def __init__(self, layer, adaptation_config, task_dim=None):
super().__init__()
self.num_maps = adaptation_config['num_maps_per_layer']
self.num_blocks = adaptation_config['num_blocks_per_layer']
self.task_dim = task_dim
self.num_target_layers = len(self.num_maps)
self.layer = layer
self.num_generated_params = 0
self.layers = self.get_layers()
def get_layers(self):
layers = nn.ModuleList()
for num_maps, num_blocks in zip(self.num_maps, self.num_blocks):
layers.append(
self.layer(
num_maps=num_maps,
num_blocks=num_blocks,
task_dim=self.task_dim
)
)
self.num_generated_params += layers[-1].num_generated_params
return layers
def forward(self, x):
return [self.layers[layer](x) for layer in range(self.num_target_layers)]
def regularization_term(self):
l2_term = 0
for layer in self.layers:
l2_term += layer.regularization_term()
return l2_term
class DenseBlock(nn.Module):
def __init__(self, in_size, out_size):
super(DenseBlock, self).__init__()
self.linear1 = nn.Linear(in_size, in_size)
self.layernorm = nn.LayerNorm(in_size)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(in_size, out_size)
def forward(self, x):
out = self.linear1(x)
out = self.layernorm(out)
out = self.relu(out)
out = self.linear2(out)
return out
class FilmLayerGenerator(BaseFilmLayer):
def __init__(self, num_maps, num_blocks, task_dim):
BaseFilmLayer.__init__(self, num_maps, num_blocks)
self.task_dim = task_dim
self.gamma_generators, self.gamma_regularizers = nn.ModuleList(), nn.ParameterList()
self.beta_generators, self.beta_regularizers = nn.ModuleList(), nn.ParameterList()
for i in range(self.num_blocks):
self.num_generated_params += 2 * num_maps[i]
self.gamma_generators.append(self._make_layer(self.task_dim, num_maps[i]))
self.gamma_regularizers.append(nn.Parameter(nn.init.normal_(torch.empty(num_maps[i]), 0, 0.001),
requires_grad=True))
self.beta_generators.append(self._make_layer(self.task_dim, num_maps[i]))
self.beta_regularizers.append(nn.Parameter(nn.init.normal_(torch.empty(num_maps[i]), 0, 0.001),
requires_grad=True))
@staticmethod
def _make_layer(in_size, out_size):
return DenseBlock(in_size, out_size)
def forward(self, x):
"""
Forward pass through adaptation network.
:param x: (torch.tensor) Input representation to network (task level representation z).
:return: (list::dictionaries) Dictionary for every block in layer. Dictionary contains all the parameters
necessary to adapt layer in base network. Base network is aware of dict structure and can pull params
out during forward pass.
"""
block_params = []
for block in range(self.num_blocks):
block_param_dict = {
'gamma': self.gamma_generators[block](x).squeeze() * self.gamma_regularizers[block] +
torch.ones_like(self.gamma_regularizers[block]),
'beta': self.beta_generators[block](x).squeeze() * self.beta_regularizers[block],
}
block_params.append(block_param_dict)
return block_params
| 6,618 | 38.634731 | 129 | py |
LITE | LITE-main/src/efficientnet.py | """
The code in this file is substantially based on the code from "A PyTorch implementation of EfficientNet"
by lukemelas that can be found here: https://github.com/lukemelas/EfficientNet-PyTorch
"""
"""Model and module class for EfficientNet.
They are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import torch
from torch import nn
from torch.nn import functional as F
from efficientnet_utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
calculate_output_image_size
)
VALID_MODELS = (
'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
'efficientnet-b8',
# Support the construction of 'efficientnet-l2' without pretrained weights
'efficientnet-l2'
)
def film(x, gamma, beta):
gamma = gamma[None, :, None, None]
beta = beta[None, :, None, None]
return gamma * x + beta
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class FilmMBConvBlock(MBConvBlock):
def __init__(self, block_args, global_params, image_size=None):
MBConvBlock.__init__(self, block_args, global_params, image_size)
def forward(self, inputs, drop_connect_rate=None, gamma=None, beta=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = film(x, gamma, beta)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class EfficientNet(nn.Module):
"""EfficientNet model.
Most easily loaded with the .from_name or .from_pretrained methods.
Args:
blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
global_params (namedtuple): A set of GlobalParams shared between blocks.
References:
[1] https://arxiv.org/abs/1905.11946 (EfficientNet)
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> model.eval()
>>> outputs = model(inputs)
"""
def __init__(self, blocks_args=None, global_params=None, block_fn=MBConvBlock):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Get stem static or dynamic convolution depending on image size
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
image_size = calculate_output_image_size(image_size, 2)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(block_fn(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if block_args.num_repeat > 1: # modify block_args to keep same output size
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(block_fn(block_args, self._global_params, image_size=image_size))
# image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
if self._global_params.include_top:
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
# set activation to memory efficient swish by default
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"""Use convolution layer to extract features
from reduction levels i in [1, 2, 3, 4, 5].
Args:
inputs (tensor): Input tensor.
Returns:
Dictionary of last intermediate features
with reduction levels i in [1, 2, 3, 4, 5].
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> endpoints = model.extract_endpoints(inputs)
>>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
>>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
>>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
>>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
>>> print(endpoints['reduction_5'].shape) # torch.Size([1, 320, 7, 7])
>>> print(endpoints['reduction_6'].shape) # torch.Size([1, 1280, 7, 7])
"""
endpoints = dict()
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
prev_x = x
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
elif idx == len(self._blocks) - 1:
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
prev_x = x
# Head
x = self._swish(self._bn1(self._conv_head(x)))
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
return endpoints
def extract_features(self, inputs):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs, film_params=None):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
# Convolution layers
x = self.extract_features(inputs)
# Pooling
x = self._avg_pooling(x)
x = x.view(x.size(0), -1)
return x
@property
def output_size(self):
return 1280
@classmethod
def from_name(cls, model_name, in_channels=3, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
in_channels (int): Input data's channel number.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
An efficientnet model.
"""
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
model = cls(blocks_args, global_params)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, weights_path=None, advprop=False,
in_channels=3, num_classes=1000, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
advprop (bool):
Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
in_channels (int): Input data's channel number.
num_classes (int):
Number of categories for classification.
It controls the output size for final linear layer.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
A pretrained efficientnet model.
"""
model = cls.from_name(model_name, num_classes=num_classes, **override_params)
load_pretrained_weights(model, model_name, weights_path=weights_path,
load_fc=(num_classes == 1000), advprop=advprop)
model._change_in_channels(in_channels)
return model
@classmethod
def get_image_size(cls, model_name):
"""Get the input image size for a given efficientnet model.
Args:
model_name (str): Name for efficientnet.
Returns:
Input image size (resolution).
"""
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
"""Validates model name.
Args:
model_name (str): Name for efficientnet.
Returns:
bool: Is a valid name or not.
"""
if model_name not in VALID_MODELS:
raise ValueError('model_name should be one of: ' + ', '.join(VALID_MODELS))
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
class FilmEfficientNet(EfficientNet):
def __init__(self, blocks_args=None, global_params=None, block_fn=FilmMBConvBlock):
EfficientNet.__init__(self, blocks_args, global_params, block_fn=block_fn)
def get_adaptation_config(self):
num_maps_per_layer, num_blocks_per_layer = [], []
num_maps_per_layer.append([self._conv_stem.out_channels])
num_blocks_per_layer.append(1)
for block in self._blocks:
num_maps_per_layer.append([block._depthwise_conv.out_channels])
num_blocks_per_layer.append(1)
num_blocks_per_layer.append(1)
num_maps_per_layer.append([self._conv_head.out_channels])
param_dict = {
'num_maps_per_layer' : num_maps_per_layer,
'num_blocks_per_layer' : num_blocks_per_layer
}
return param_dict
def extract_features(self, inputs, param_dict):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
layer_idx = 0
x = self._bn0(self._conv_stem(inputs))
x = film(x, param_dict[layer_idx][0]['gamma'], param_dict[layer_idx][0]['beta'])
layer_idx += 1
x = self._swish(x)
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate, gamma=param_dict[layer_idx][0]['gamma'],
beta=param_dict[layer_idx][0]['beta'])
layer_idx += 1
# Head
x = self._bn1(self._conv_head(x))
x = film(x, param_dict[layer_idx][0]['gamma'], param_dict[layer_idx][0]['beta'])
x = self._swish(x)
return x
def forward(self, inputs, film_params=None):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
# Convolution layers
x = self.extract_features(inputs, film_params)
# Pooling
x = self._avg_pooling(x)
x = x.view(x.size(0), -1)
return x
def film_efficientnet(type='efficientnet-b0'):
return FilmEfficientNet.from_pretrained(type, num_classes=0)
def film_efficientnet_b0_84(pretrained_model_path):
checkpoint = torch.load(pretrained_model_path)
state_dict = checkpoint['state_dict']
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
feature_extractor = FilmEfficientNet.from_name('efficientnet-b0', num_classes=0)
ret = feature_extractor.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(['_fc.weight', '_fc.bias']), 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
assert not ret.unexpected_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.unexpected_keys)
return feature_extractor
| 22,768 | 38.736475 | 144 | py |
LITE | LITE-main/src/utils.py | import os
import torch
import torch.nn.functional as F
import numpy as np
from enum import Enum
import sys
import math
class MetaLearningState(Enum):
META_TRAIN = 0
META_TEST = 1
class ValidationAccuracies:
"""
Determines if an evaluation on the validation set is better than the best so far.
In particular, this handles the case for meta-dataset where we validate on multiple datasets and we deem
the evaluation to be better if more than half of the validation accuracies on the individual validation datsets
are better than the previous best.
"""
def __init__(self, validation_datasets):
self.datasets = validation_datasets
self.dataset_count = len(self.datasets)
self.current_best_accuracy_dict = {}
for dataset in self.datasets:
self.current_best_accuracy_dict[dataset] = {"accuracy": 0.0, "confidence": 0.0}
def is_better(self, accuracies_dict):
is_better = False
is_better_count = 0
for i, dataset in enumerate(self.datasets):
if accuracies_dict[dataset]["accuracy"] > self.current_best_accuracy_dict[dataset]["accuracy"]:
is_better_count += 1
if is_better_count >= int(math.ceil(self.dataset_count / 2.0)):
is_better = True
return is_better
def replace(self, accuracies_dict):
self.current_best_accuracy_dict = accuracies_dict
def print(self, logger, accuracy_dict):
logger.print_and_log("") # add a blank line
logger.print_and_log("Validation Accuracies:")
for dataset in self.datasets:
logger.print_and_log("{0:}: {1:.1f}+/-{2:.1f}".format(dataset, accuracy_dict[dataset]["accuracy"],
accuracy_dict[dataset]["confidence"]))
logger.print_and_log("") # add a blank line
def get_current_best_accuracy_dict(self):
return self.current_best_accuracy_dict
class LogFiles:
def __init__(self, checkpoint_dir, resume, test_mode):
self._checkpoint_dir = checkpoint_dir
if not self._verify_checkpoint_dir(resume, test_mode):
sys.exit()
if not test_mode and not resume:
os.makedirs(self.checkpoint_dir)
self._best_validation_model_path = os.path.join(checkpoint_dir, 'best_validation.pt')
self._fully_trained_model_path = os.path.join(checkpoint_dir, 'fully_trained.pt')
@property
def checkpoint_dir(self):
return self._checkpoint_dir
@property
def best_validation_model_path(self):
return self._best_validation_model_path
@property
def fully_trained_model_path(self):
return self._fully_trained_model_path
def _verify_checkpoint_dir(self, resume, test_mode):
checkpoint_dir_is_ok = True
if resume: # verify that the checkpoint directory and file exists
if not os.path.exists(self.checkpoint_dir):
print("Can't resume from checkpoint. Checkpoint directory ({}) does not exist.".format(self.checkpoint_dir), flush=True)
checkpoint_dir_is_ok = False
checkpoint_file = os.path.join(self.checkpoint_dir, 'checkpoint.pt')
if not os.path.isfile(checkpoint_file):
print("Can't resume for checkpoint. Checkpoint file ({}) does not exist.".format(checkpoint_file), flush=True)
checkpoint_dir_is_ok = False
elif test_mode:
if not os.path.exists(self.checkpoint_dir):
print("Can't test. Checkpoint directory ({}) does not exist.".format(self.checkpoint_dir), flush=True)
checkpoint_dir_is_ok = False
else:
if os.path.exists(self.checkpoint_dir):
print("Checkpoint directory ({}) already exits.".format(self.checkpoint_dir), flush=True)
print("If starting a new training run, specify a directory that does not already exist.", flush=True)
print("If you want to resume a training run, specify the -r option on the command line.", flush=True)
checkpoint_dir_is_ok = False
return checkpoint_dir_is_ok
class Logger:
def __init__(self, checkpoint_dir, log_file_name):
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
log_file_path = os.path.join(checkpoint_dir, log_file_name)
self.file = None
if os.path.isfile(log_file_path):
self.file = open(log_file_path, "a", buffering=1)
else:
self.file = open(log_file_path, "w", buffering=1)
def __del__(self):
self.file.close()
def log(self, message):
self.file.write(message + '\n')
def print_and_log(self, message):
print(message, flush=True)
self.log(message)
def compute_accuracy(logits, labels):
"""
Compute classification accuracy.
"""
return torch.mean(torch.eq(labels, torch.argmax(logits, dim=-1)).float())
def cross_entropy_loss(logits, labels):
return F.cross_entropy(logits, labels)
def shuffle(images, labels):
"""
Return shuffled data.
"""
permutation = np.random.permutation(images.shape[0])
return images[permutation], labels[permutation]
| 5,276 | 35.393103 | 136 | py |
LITE | LITE-main/src/model.py | import torch
import numpy as np
import torch.nn as nn
from config_networks import ConfigureNetworks
from mahalanonbis import MahalanobisPredictor
from set_encoder import mean_pooling
class FewShotClassifier(nn.Module):
def __init__(self, args, logger, device):
super(FewShotClassifier, self).__init__()
self.args = args
self.logger = logger
self.device = device
networks = ConfigureNetworks(args=self.args)
self.set_encoder = networks.get_encoder()
self.feature_extractor = networks.get_feature_extractor()
self.feature_adaptation_network = networks.get_feature_adaptation()
self.task_representation = None
self.classifier = MahalanobisPredictor()
self.means = None
self.precisions = None
self.reps_cache = None
def forward(self, context_images, context_labels, target_images, meta_learning_state):
self.build_task_representation(context_images)
context_features, target_features = self.get_features(context_images, target_images, meta_learning_state)
self.means, self.precisions = self.classifier.compute_class_means_and_precisions(context_features, context_labels)
return self.classifier.predict(target_features, self.means, self.precisions)
def configure_classifier(self, context_features, context_labels):
self.means, self.precisions = self.classifier.compute_class_means_and_precisions(context_features, context_labels)
def predict(self, target_images, meta_learning_state):
target_features = self.get_target_features(target_images, meta_learning_state)
return self.classifier.predict(target_features, self.means, self.precisions)
def build_task_representation(self, context_images):
self.task_representation = self.set_encoder(context_images)
def build_task_representation_by_batch(self, context_images):
reps = []
num_images = context_images.size(0)
num_batches = int(np.ceil(float(num_images) / float(self.args.batch_size)))
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
reps.append(self.set_encoder.pre_pool(context_images[batch_start_index: batch_end_index]))
self.task_representation = mean_pooling(torch.vstack(reps))
def build_task_representation_with_split_batch(self, context_images, grad_indices, no_grad_indices):
num_images = context_images.size(0)
if self.reps_cache is None: # cache the part with no gradients
reps = []
num_batches = int(np.ceil(float(num_images) / float(self.args.batch_size)))
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
torch.set_grad_enabled(False)
reps.append(self.set_encoder.pre_pool(context_images[batch_start_index: batch_end_index]))
self.reps_cache = torch.vstack(reps).to(self.device)
# now select some random images for that will have gradients and process those
embeddings = []
if len(grad_indices) > 0:
torch.set_grad_enabled(True)
embeddings.append(self.set_encoder.pre_pool(context_images[grad_indices]))
# now add in the no_grad images
embeddings.extend(self.reps_cache[no_grad_indices])
# pool
self.task_representation = mean_pooling(torch.vstack(embeddings))
def get_context_features(self, context_images, meta_learning_state):
feature_extractor_params = self.feature_adaptation_network(self.task_representation)
self._set_batch_norm_mode(meta_learning_state)
return self.feature_extractor(context_images, feature_extractor_params)
def get_target_features(self, target_images, meta_learning_state):
feature_extractor_params = self.feature_adaptation_network(self.task_representation)
self._set_batch_norm_mode(meta_learning_state)
return self.feature_extractor(target_images, feature_extractor_params)
def get_features(self, context_images, target_images, meta_learning_state):
feature_extractor_params = self.feature_adaptation_network(self.task_representation)
self._set_batch_norm_mode(meta_learning_state)
context_features = self.feature_extractor(context_images, feature_extractor_params)
self._set_batch_norm_mode(meta_learning_state)
target_features = self.feature_extractor(target_images, feature_extractor_params)
return context_features, target_features
def _set_batch_norm_mode(self, meta_learning_state):
self.feature_extractor.eval() # ignore context and state flag
def _get_batch_indices(self, index, last_element):
batch_start_index = index * self.args.batch_size
batch_end_index = batch_start_index + self.args.batch_size
if batch_end_index == (last_element - 1): # avoid batch size of 1
batch_end_index = last_element
if batch_end_index > last_element:
batch_end_index = last_element
return batch_start_index, batch_end_index
def count_parameters(self, model):
model_param_count = sum(p.numel() for p in model.parameters())
model_trainable_param_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
feature_extractor_param_count = sum(p.numel() for p in model.feature_extractor.parameters())
set_encoder_param_count = sum(p.numel() for p in model.set_encoder.parameters())
feature_adaptation_param_count = sum(p.numel() for p in model.feature_adaptation_network.parameters())
self.logger.print_and_log('Parameter Counts:')
self.logger.print_and_log('Model: {}'.format(model_param_count))
self.logger.print_and_log('Trainable: {}'.format(model_trainable_param_count))
self.logger.print_and_log('Feature Extractor: {}'.format(feature_extractor_param_count))
self.logger.print_and_log('Set Encoder: {}'.format(set_encoder_param_count))
self.logger.print_and_log('Feature Extractor Adaptation Network: {}'.format(feature_adaptation_param_count))
def clear_caches(self):
self.reps_cache = None
| 6,282 | 51.798319 | 122 | py |
LITE | LITE-main/src/run.py | import torch
import numpy as np
import argparse
import os
from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState,\
shuffle
from model import FewShotClassifier
from dataset import get_dataset_reader
from tf_dataset_reader import TfDatasetReader
from image_folder_reader import ImageFolderReader
NUM_VALIDATION_TASKS = 200
NUM_TEST_TASKS = 600
PRINT_FREQUENCY = 1000
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.log_files = LogFiles(self.args.checkpoint_dir, self.args.resume_from_checkpoint,
(self.args.mode == 'test') or (self.args.mode == 'test_vtab'))
self.logger = Logger(self.args.checkpoint_dir, "log.txt")
self.logger.print_and_log("Options: %s\n" % self.args)
self.logger.print_and_log("Checkpoint Directory: %s\n" % self.log_files.checkpoint_dir)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
if self.args.mode == "train" or self.args.mode == "test" or self.args.mode == 'train_test':
self.dataset = get_dataset_reader(
args=self.args,
train_set=self.train_set,
validation_set=self.validation_set,
test_set=self.test_set)
if self.args.train_method == 'lite':
self.train_fn = self.train_lite
else:
self.train_fn = self.train_task
self.use_batches = False if self.args.train_method == 'no_lite' else True
self.loss = cross_entropy_loss
self.accuracy_fn = compute_accuracy
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.validation_accuracies = ValidationAccuracies(self.validation_set)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
self.feature_cache = None
def init_model(self):
model = FewShotClassifier(args=self.args, logger=self.logger, device=self.device).to(self.device)
model.count_parameters(model)
# set encoder is always in train mode (it only sees context data).
# Feature extractor gets switched in model.
model.train()
return model
def init_data(self):
train_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mnist']
validation_set = ['omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mscoco']
test_set = self.args.test_datasets
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
# operational parameters
parser.add_argument("--mode", choices=["train", "test", "train_test", "test_vtab"], default="train_test",
help="Whether to run meta-training only, meta-testing only,"
"both meta-training and meta-testing, or testing on vtab.")
parser.add_argument("--checkpoint_dir", "-c", default='../checkpoints', help="Directory to save checkpoint to.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False,
action="store_true", help="Restart from latest checkpoint.")
# data parameters
parser.add_argument('--test_datasets', nargs='+', help='Datasets to use for testing',
default=["omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi", "traffic_sign",
"mscoco"])
parser.add_argument("--data_path", default="../datasets", help="Path to Meta-Dataset records.")
parser.add_argument("--download_path_for_tensorflow_datasets", default=None,
help="Path to download the tensorflow datasets.")
parser.add_argument("--download_path_for_sun397_dataset", default=None,
help="Path to download the sun397 dataset.")
# training parameters
parser.add_argument("--train_method", choices=["lite", "small_task", "no_lite"], default="lite",
help="Whether to use lite, small tasks, or not lite.")
parser.add_argument("--pretrained_model_path", default="../models/efficientnet-b0_84.pt",
help="Path to dataset records.")
parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.")
parser.add_argument("--tasks_per_step", type=int, default=16,
help="Number of tasks between parameter optimizations.")
parser.add_argument("--training_iterations", "-i", type=int, default=10000,
help="Number of meta-training iterations.")
parser.add_argument("--max_way_train", type=int, default=50, help="Maximum way of meta-train task.")
parser.add_argument("--max_support_train", type=int, default=500,
help="Maximum support set size of meta-train task.")
parser.add_argument("--image_size", type=int, default=224, help="Image height and width.")
parser.add_argument("--batch_size", type=int, default=40, help="Size of batch.")
parser.add_argument("--h", type=int, default=40,
help="Number of support set samples to back-propagate when training with LITE.")
# testing parameters
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--val_freq", type=int, default=5000, help="Number of iterations between validations.")
args = parser.parse_args()
return args
def run(self):
if self.args.mode == 'train' or self.args.mode == 'train_test':
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
for iteration in range(self.start_iteration, total_iterations):
task_dict = self.dataset.get_train_task()
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.clear_caches()
self.feature_cache = None
target_set_size = len(target_labels)
num_batches = self._get_number_of_batches(target_set_size)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, target_set_size)
batch_loss, batch_accuracy = self.train_fn(
context_images,
target_images[batch_start_index : batch_end_index],
context_labels,
target_labels[batch_start_index : batch_end_index]
)
train_accuracies.append(batch_accuracy)
losses.append(batch_loss)
else:
task_loss, task_accuracy = self.train_fn(context_images, target_images, context_labels,
target_labels)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_step == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
if (iteration + 1) % PRINT_FREQUENCY == 0:
# print training stats
self.save_checkpoint(iteration + 1)
torch.save(self.model.state_dict(), os.path.join(self.log_files.checkpoint_dir,
"model_{}.pt".format(iteration + 1)))
self.logger.print_and_log('Task [{}/{}], Train Loss: {:.7f},'
'Train Accuracy: {:.7f}, Learning Rate: {:.7f}'
.format(iteration + 1, total_iterations,
torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item(),
self.optimizer.param_groups[0]['lr']))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.val_freq == 0) and (iteration + 1) != total_iterations:
# validate
accuracy_dict = self.validate()
self.validation_accuracies.print(self.logger, accuracy_dict)
# save the model if validation is the best so far
if self.validation_accuracies.is_better(accuracy_dict):
self.validation_accuracies.replace(accuracy_dict)
torch.save(self.model.state_dict(), self.log_files.best_validation_model_path)
self.logger.print_and_log('Best validation model was updated.')
self.logger.print_and_log('')
# save the final model
torch.save(self.model.state_dict(), self.log_files.fully_trained_model_path)
if self.args.mode == 'train_test':
self.test(self.log_files.fully_trained_model_path)
self.test(self.log_files.best_validation_model_path)
if self.args.mode == 'test':
self.test(self.args.test_model_path)
if self.args.mode == 'test_vtab':
self._test_transfer_learning(self.args.test_model_path)
def train_task(self, context_images, target_images, context_labels, target_labels):
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TRAIN)
task_loss = self.loss(target_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
task_loss += regularizer_scaling * regularization_term
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def train_lite(self, context_images, target_images, context_labels, target_labels):
# We'll split the context set into two: the first part will be of size batch_size and we'll use gradients
# for that. The second part will be everything else and we'll use no gradients for that, so we only need to
# compute that once per task.
context_size = context_images.size(0)
indices = np.random.permutation(context_size)
h = min(self.args.h, context_size) # number of example to back propagate
grad_indices = indices[0: h]
no_grad_indices = indices[h:]
self.model.build_task_representation_with_split_batch(context_images, grad_indices, no_grad_indices)
context_features = self._compute_features_with_split_batch(context_images, grad_indices, no_grad_indices,
MetaLearningState.META_TRAIN)
self.model.configure_classifier(context_features, context_labels[indices])
# now the target set
torch.set_grad_enabled(True)
batch_logits = self.model.predict(target_images, MetaLearningState.META_TRAIN)
# compute the loss
batch_loss = self.loss(batch_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
batch_loss += regularizer_scaling * regularization_term
# compute accuracy
batch_accuracy = self.accuracy_fn(batch_logits, target_labels)
batch_loss.backward(retain_graph=False)
return batch_loss, batch_accuracy
def _get_number_of_batches(self, task_size):
num_batches = int(np.ceil(float(task_size) / float(self.args.batch_size)))
if num_batches > 1 and (task_size % self.args.batch_size == 1):
num_batches -= 1
return num_batches
def _get_batch_indices(self, index, last_element):
batch_start_index = index * self.args.batch_size
batch_end_index = batch_start_index + self.args.batch_size
if batch_end_index == (last_element - 1): # avoid batch size of 1
batch_end_index = last_element
if batch_end_index > last_element:
batch_end_index = last_element
return batch_start_index, batch_end_index
def validate(self):
with torch.no_grad():
accuracy_dict ={}
for item in self.validation_set:
accuracies = []
for _ in range(NUM_VALIDATION_TASKS):
task_dict = self.dataset.get_validation_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
return accuracy_dict
def test(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
with torch.no_grad():
for item in self.test_set:
accuracies = []
for _ in range(NUM_TEST_TASKS):
task_dict = self.dataset.get_test_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images,
MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
accuracy_confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
self.logger.print_and_log('{0:}: {1:3.1f}+/-{2:2.1f}'.format(item, accuracy, accuracy_confidence))
def _test_transfer_learning(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
context_set_size = 1000
datasets = [
{'name': "caltech101", 'task': None, 'enabled': True},
{'name': "cifar100", 'task': None, 'enabled': True},
{'name': "oxford_flowers102", 'task': None, 'enabled': True},
{'name': "oxford_iiit_pet", 'task': None, 'enabled': True},
{'name': "sun397", 'task': None, 'enabled': True},
{'name': "svhn_cropped", 'task': None, 'enabled': True},
{'name': "eurosat", 'task': None, 'enabled': True},
{'name': "resisc45", 'task': None, 'enabled': True},
{'name': "patch_camelyon", 'task': None, 'enabled': True},
{'name': "diabetic_retinopathy_detection", 'task': None, 'enabled': True},
{'name': "clevr", 'task': "count", 'enabled': True},
{'name': "clevr", 'task': "distance", 'enabled': True},
{'name': "dsprites", 'task': "location", 'enabled': True},
{'name': "dsprites", 'task': "orientation", 'enabled': True},
{'name': "smallnorb", 'task': "azimuth", 'enabled': True},
{'name': "smallnorb", 'task': "elevation", 'enabled': True},
{'name': "dmlab", 'task': None, 'enabled': True},
{'name': "kitti", 'task': None, 'enabled': True},
]
with torch.no_grad():
for dataset in datasets:
if dataset['enabled'] is False:
continue
if dataset['name'] == "sun397": # use the image folder reader as the tf reader is broken for sun397
dataset_reader = ImageFolderReader(
path_to_images=self.args.download_path_for_sun397_dataset,
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
image_size=self.args.image_size,
device=self.device)
else: # use the tensorflow dataset reader
dataset_reader = TfDatasetReader(
dataset=dataset['name'],
task=dataset['task'],
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
path_to_datasets=self.args.download_path_for_tensorflow_datasets,
image_size=self.args.image_size,
device=self.device
)
context_images, context_labels = dataset_reader.get_context_batch()
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = dataset_reader.get_target_dataset_length()
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
target_labels = []
for batch in range(num_batches):
batch_target_images, batch_target_labels = dataset_reader.get_target_batch()
batch_logits = self.model.predict(batch_target_images, MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_labels.append(batch_target_labels)
target_logits = torch.vstack(target_logits)
target_labels = torch.hstack(target_labels)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracy = target_accuracy * 100.0
if dataset['task'] is None:
self.logger.print_and_log('{0:}: {1:3.1f}'.format(dataset['name'], accuracy))
else:
self.logger.print_and_log('{0:} {1:}: {2:3.1f}'.format(dataset['name'], dataset['task'], accuracy))
def _compute_features_by_batch(self, images, meta_learning_state):
features = []
num_images = images.size(0)
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
return torch.vstack(features)
def _compute_features_with_split_batch(self, images, grad_indices, no_grad_indices, meta_learning_state):
num_images = images.size(0)
if self.feature_cache is None: # cache the part with no gradients
features = []
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
torch.set_grad_enabled(False)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
self.feature_cache = torch.vstack(features).to(self.device)
# now select some random images for that will have gradients and process those
embeddings = []
if len(grad_indices) > 0:
torch.set_grad_enabled(True)
embeddings.append(self.model.get_context_features(images[grad_indices], meta_learning_state))
# now add in the no_grad images
embeddings.extend(self.feature_cache[no_grad_indices])
return torch.vstack(embeddings)
def prepare_task(self, task_dict):
context_images_np, context_labels_np = task_dict['context_images'], task_dict['context_labels']
target_images_np, target_labels_np = task_dict['target_images'], task_dict['target_labels']
context_images_np = context_images_np.transpose([0, 3, 1, 2])
context_images_np, context_labels_np = shuffle(context_images_np, context_labels_np)
context_images = torch.from_numpy(context_images_np)
context_labels = torch.from_numpy(context_labels_np)
target_images_np = target_images_np.transpose([0, 3, 1, 2])
target_images_np, target_labels_np = shuffle(target_images_np, target_labels_np)
target_images = torch.from_numpy(target_images_np)
target_labels = torch.from_numpy(target_labels_np)
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels
def save_checkpoint(self, iteration):
torch.save({
'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_accuracy': self.validation_accuracies.get_current_best_accuracy_dict(),
}, os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.validation_accuracies.replace(checkpoint['best_accuracy'])
if __name__ == "__main__":
main()
| 25,859 | 52.987474 | 126 | py |
LITE | LITE-main/src/mahalanonbis.py | import torch
"""
The code in this file is substantially based on the code for "Improved Few-Shot Visual Classification"
by Peyman Bateni, Raghav Goyal, Vaden Masrani1, Frank Wood, and Leonid Sigal
that can be found here: https://github.com/peymanbateni/simple-cnaps
"""
class MahalanobisPredictor:
def __init__(self):
return
def predict(self, target_features, class_means, class_precision_matrices):
# grabbing the number of classes and query examples for easier use later in the function
number_of_classes = class_means.size(0)
number_of_targets = target_features.size(0)
"""
Calculating the Mahalanobis distance between query examples and the class means
including the class precision estimates in the calculations, reshaping the distances
and multiplying by -1 to produce the sample logits
"""
repeated_target = target_features.repeat(1, number_of_classes).view(-1, class_means.size(1))
repeated_class_means = class_means.repeat(number_of_targets, 1)
repeated_difference = (repeated_class_means - repeated_target)
repeated_difference = repeated_difference.view(number_of_targets, number_of_classes,
repeated_difference.size(1)).permute(1, 0, 2)
first_half = torch.matmul(repeated_difference, class_precision_matrices)
logits = torch.mul(first_half, repeated_difference).sum(dim=2).transpose(1, 0) * -1
return logits
def compute_class_means_and_precisions(self, features, labels):
means = []
precisions = []
task_covariance_estimate = self._estimate_cov(features)
for c in torch.unique(labels):
# filter out feature vectors which have class c
class_features = torch.index_select(features, 0, self._extract_class_indices(labels, c))
# mean pooling examples to form class means
means.append(self._mean_pooling(class_features).squeeze())
lambda_k_tau = (class_features.size(0) / (class_features.size(0) + 1))
precisions.append(torch.inverse(
(lambda_k_tau * self._estimate_cov(class_features)) + ((1 - lambda_k_tau) * task_covariance_estimate)\
+ torch.eye(class_features.size(1), class_features.size(1)).cuda(0)))
means = (torch.stack(means))
precisions = (torch.stack(precisions))
return means, precisions
@staticmethod
def _estimate_cov(examples, rowvar=False, inplace=False):
"""
SCM: function based on the suggested implementation of Modar Tensai
and his answer as noted in: https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/5
Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
examples: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
"""
if examples.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if examples.dim() < 2:
examples = examples.view(1, -1)
if not rowvar and examples.size(0) != 1:
examples = examples.t()
factor = 1.0 / (examples.size(1) - 1)
if inplace:
examples -= torch.mean(examples, dim=1, keepdim=True)
else:
examples = examples - torch.mean(examples, dim=1, keepdim=True)
examples_t = examples.t()
return factor * examples.matmul(examples_t).squeeze()
@staticmethod
def _extract_class_indices(labels, which_class):
class_mask = torch.eq(labels, which_class) # binary mask of labels equal to which_class
class_mask_indices = torch.nonzero(class_mask) # indices of labels equal to which class
return torch.reshape(class_mask_indices, (-1,)) # reshape to be a 1D vector
@staticmethod
def _mean_pooling(x):
return torch.mean(x, dim=0, keepdim=True)
| 4,701 | 46.02 | 118 | py |
LITE | LITE-main/src/image_folder_reader.py | import torch
from torchvision.datasets import ImageFolder
import torchvision.transforms as T
import numpy as np
class ImageFolderReader:
def __init__(self, path_to_images, context_batch_size, target_batch_size, image_size, device,
train_fraction=0.7, val_fraction=0.1, test=0.2):
self.device = device
self.path_to_images = path_to_images
self.context_batch_size = context_batch_size
transforms = T.Compose([
T.Resize((image_size, image_size)),
T.ToTensor(),
T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # normalize to -1 to 1
])
data = ImageFolder(root=path_to_images, transform=transforms)
dataset_length = len(data)
train_size = int(round(train_fraction * dataset_length))
val_size = int(round(val_fraction * dataset_length))
self.test_size = dataset_length - train_size - val_size
train_set, val_set, test_set = torch.utils.data.random_split(data, [train_size, val_size, self.test_size],
generator=torch.Generator().manual_seed(15))
self.context_iterator = iter(torch.utils.data.DataLoader(
dataset=train_set,
batch_size=1,
shuffle=True,
num_workers=4))
self.target_iterator = iter(torch.utils.data.DataLoader(
dataset=test_set,
batch_size=target_batch_size,
shuffle=False,
num_workers=4
))
def get_target_dataset_length(self):
return self.test_size
def get_context_batch(self):
return self._get_sun397_context_batch(self.context_iterator)
def get_target_batch(self):
return self._get_batch(self.target_iterator, is_target=True)
def _get_batch(self, iterator, is_target):
images, labels = iterator.next()
# move the images and labels to the device
images = images.to(self.device)
if is_target:
labels = labels.type(torch.LongTensor).to(self.device)
else:
labels = labels.to(self.device)
return images, labels
def _get_sun397_context_batch(self, iterator):
# This code is slow and hacky, but assures we get a context set
# of the correct size with at least one example per class.
images = []
labels = []
label_counts = np.zeros(397, dtype=np.int)
count = 0
while True:
image, label = iterator.next()
index = label.cpu().numpy()
if label_counts[index] < 2:
images.append(image)
labels.append(label)
label_counts[index] += 1
all_labels = torch.hstack(labels)
count += 1
if len(torch.unique(all_labels)) == 397 or count == 10000:
break
current_count = len(labels)
to_get = 1000 - current_count
for _ in range(to_get):
image, label = iterator.next()
images.append(image)
labels.append(label)
images = torch.vstack(images)
labels = torch.hstack(labels)
images = images.to(self.device)
labels = labels.to(self.device)
return images, labels
| 3,305 | 34.170213 | 114 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/main_molecules_graph_regression.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.molecules_graph_regression.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if re.search('GatedGCN.+',MODEL_NAME) or re.search('bi.+',MODEL_NAME):
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'])
print('Time PE:',time.time()-t0)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_MAEs, epoch_val_MAEs = [], []
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WLGNNs
from train.train_molecules_graph_regression import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
from functools import partial # util function to pass edge_feat to collate function
train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
else:
# import train functions for all other GNNs
if re.search('.+for_Eval',MODEL_NAME):
from train.train_molecules_graph_regression_for_Eval import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
else:
from train.train_molecules_graph_regression import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for RingGNN
epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['mode'])
try:
epoch_val_loss, epoch_val_mae = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_mae = evaluate_network(model, device, test_loader, epoch, params['mode'])
except:
epoch_val_loss, epoch_val_mae, _ = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_mae, _ = evaluate_network(model, device, test_loader, epoch, params['mode'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_MAEs.append(epoch_train_mae)
epoch_val_MAEs.append(epoch_val_mae)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_mae', epoch_train_mae, epoch)
writer.add_scalar('val/_mae', epoch_val_mae, epoch)
writer.add_scalar('test/_mae', epoch_test_mae, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_MAE=epoch_train_mae, val_MAE=epoch_val_mae,
test_MAE=epoch_test_mae)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
try:
_, test_mae = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_mae = evaluate_network(model, device, train_loader, epoch, params['mode'])
except:
_, test_mae, hs = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_mae, _ = evaluate_network(model, device, train_loader, epoch, params['mode'])
torch.save(hs,'{}.pt'.format(ckpt_dir + "/features"))
print("Test MAE: {:.4f}".format(test_mae))
print("Train MAE: {:.4f}".format(train_mae))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST MAE: {:.4f}\nTRAIN MAE: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_mae, train_mae, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
parser.add_argument('--mode', help="Please give a value for mode")
parser.add_argument('--sigma', help="Please give a value for sigma")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.mode is not None:
params['mode'] = str(args.mode)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# ZINC
net_params['num_atom_type'] = dataset.num_atom_type
net_params['num_bond_type'] = dataset.num_bond_type
if re.search('bi.+',MODEL_NAME):
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio'])
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,964 | 41.478723 | 202 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/main_TSP_edge_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.TSP_edge_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
#assert net_params['self_loop'] == False, "No self-loop support for %s dataset" % DATASET_NAME
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_f1s, epoch_val_f1s = [], []
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_TSP_edge_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
from functools import partial # util function to pass edge_feat to collate function
train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
else:
# import train functions for all other GCNs
from train.train_TSP_edge_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['mode'])
epoch_val_loss, epoch_val_f1 = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch, params['mode'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_f1s.append(epoch_train_f1)
epoch_val_f1s.append(epoch_val_f1)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_f1', epoch_train_f1, epoch)
writer.add_scalar('val/_f1', epoch_val_f1, epoch)
writer.add_scalar('test/_f1', epoch_test_f1, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_f1=epoch_train_f1, val_f1=epoch_val_f1,
test_f1=epoch_test_f1)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_f1 = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_f1 = evaluate_network(model, device, train_loader, epoch, params['mode'])
print("Test F1: {:.4f}".format(test_f1))
print("Train F1: {:.4f}".format(train_f1))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST F1: {:.4f}\nTRAIN F1: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--layer_type', help="Please give a value for layer_type (for GAT and GatedGCN only)")
parser.add_argument('--mode', help="Please give a value for mode")
parser.add_argument('--sigma', help="Please give a value for sigma")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.mode is not None:
params['mode'] = str(args.mode)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.layer_type is not None:
net_params['layer_type'] = layer_type
# TSP
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].shape[0]
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.concatenate(dataset.train[:][1])))
net_params['n_classes'] = num_classes
if re.search('bi.+',MODEL_NAME):
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio'])
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 18,702 | 39.836245 | 202 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/main_TUs_graph_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.TUs_graph_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
avg_test_acc = []
avg_train_acc = []
avg_convergence_epochs = []
t0 = time.time()
per_epoch_time = []
dataset = LoadData(DATASET_NAME)
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
# At any point you can hit Ctrl + C to break out of training early.
try:
for split_number in range(10):
t0_split = time.time()
log_dir = os.path.join(root_log_dir, "RUN_" + str(split_number))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("RUN NUMBER: ", split_number)
trainset, valset, testset = dataset.train[split_number], dataset.val[split_number], dataset.test[split_number]
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_TUs_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_TUs_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['mode'])
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_" + str(split_number))
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0_split > params['max_time']*3600/10: # Dividing max_time by 10, since there are 10 runs in TUs
print('-' * 89)
print("Max_time for one train-val-test split experiment elapsed {:.3f} hours, so stopping".format(params['max_time']/10))
break
_, test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_acc = evaluate_network(model, device, train_loader, epoch, params['mode'])
avg_test_acc.append(test_acc)
avg_train_acc.append(train_acc)
avg_convergence_epochs.append(epoch)
print("Test Accuracy [LAST EPOCH]: {:.4f}".format(test_acc))
print("Train Accuracy [LAST EPOCH]: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
print("TOTAL TIME TAKEN: {:.4f}hrs".format((time.time()-t0)/3600))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
print("AVG CONVERGENCE Time (Epochs): {:.4f}".format(np.mean(np.array(avg_convergence_epochs))))
# Final test accuracy value averaged over 10-fold
print("""\n\n\nFINAL RESULTS\n\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))
print("\nAll splits Test Accuracies:\n", avg_test_acc)
print("""\n\n\nFINAL RESULTS\n\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100))
print("\nAll splits Train Accuracies:\n", avg_train_acc)
writer.close()
"""
Write the results in out/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\n\n
Average Convergence Time (Epochs): {:.4f} with s.d. {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,
np.mean(avg_convergence_epochs), np.std(avg_convergence_epochs),
(time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--mode', help="Please give a value for mode")
parser.add_argument('--sigma', help="Please give a value for sigma")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.mode is not None:
params['mode'] = str(args.mode)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
# TUs
net_params['in_dim'] = dataset.all.graph_lists[0].ndata['feat'][0].shape[0]
num_classes = len(np.unique(dataset.all.graph_labels))
net_params['n_classes'] = num_classes
if re.search('bi.+',MODEL_NAME):
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio'])
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs)
main()
| 20,561 | 43.029979 | 202 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/main_superpixels_graph_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_superpixels_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['mode'])
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_acc = evaluate_network(model, device, train_loader, epoch, params['mode'])
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--sigma', help="Please give a value for sigma")
parser.add_argument('--mode', help="Please give a value for mode")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.mode is not None:
params['mode'] = str(args.mode)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
# Superpixels
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.array(dataset.train[:][1])))
net_params['n_classes'] = num_classes
if re.search('bi.+',MODEL_NAME):
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
max_num_nodes_train = max([dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))])
max_num_nodes_test = max([dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))])
max_num_node = max(max_num_nodes_train, max_num_nodes_test)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio'])
if MODEL_NAME == 'RingGNN':
num_nodes_train = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
num_nodes_test = [dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))]
num_nodes = num_nodes_train + num_nodes_test
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 18,866 | 41.113839 | 202 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/main_SBMs_node_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.SBMs_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['GatedGCN'] or re.search('bi.+',MODEL_NAME) or re.search('.+for_Eval',MODEL_NAME):
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'])
print('Time PE:',time.time()-start0)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
if re.search('.+for_Eval',MODEL_NAME):
from train.train_SBMs_node_classification_for_Eval import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
else:
from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['mode'])
if re.search('.+for_Eval',MODEL_NAME):
epoch_val_loss, epoch_val_acc, _ = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_acc, _ = evaluate_network(model, device, test_loader, epoch, params['mode'])
else:
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch, params['mode'])
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
if re.search('.+for_Eval',MODEL_NAME):
_, test_acc, hs = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_acc, _ = evaluate_network(model, device, train_loader, epoch, params['mode'])
else:
_, test_acc = evaluate_network(model, device, test_loader, epoch, params['mode'])
_, train_acc = evaluate_network(model, device, train_loader, epoch, params['mode'])
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
parser.add_argument('--mode', help="Please give a value for mode")
parser.add_argument('--sigma', help="Please give a value for sigma")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.mode is not None:
params['mode'] = str(args.mode)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# SBM
net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
if re.search('bi.+',MODEL_NAME):
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio'])
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,778 | 40.552521 | 202 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/bi_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.bi_gcn_layer import biGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
# output
h_out = self.MLP_layer(h)
return h_out, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,566 | 34.96063 | 140 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/bi_gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer
from layers.bi_gated_gcn_layer import biGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, residual=self.residual))
for _ in range(n_layers-2):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
cnt = 0
for conv in self.layers:
if cnt == 1:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
# output
h_out = self.MLP_layer(h)
return h_out, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
class biGatedGCNNet_IL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
for l in range(self.n_layers-1):
if l % 3 == 1:
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual))
else:
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
s = []
for conv in self.layers:
try:
h, e, tmp = conv(g, h, e)
s.append(tmp)
except:
h, e = conv(g, h, e)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
# output
h_out = self.MLP_layer(h)
return h_out, self.S
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / soft_assign.shape[0]
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / soft_assign.shape[0]
class biGatedGCNNet_ALL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual ) for _ in range(self.n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
s = []
for conv in self.layers:
h, e, tmp = conv(g, h, e)
s.append(tmp)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
# output
h_out = self.MLP_layer(h)
return h_out, self.S
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / self.n_layers
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / self.n_layers
class biGatedGCNNet_lazy(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual),
GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
cnt = 0
for conv in self.layers:
if cnt == 2:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
# output
h_out = self.MLP_layer(h)
return h_out, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 18,704 | 41.319005 | 147 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/gcn_net_for_Eval.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet_for_Eval(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sg_flag = False
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
hs = [] # for feature analysis
hs.append(h) # initial features
for conv in self.layers:
h = conv(g, h)
hs.append(h)
Hs = torch.stack(hs,dim=0) # L x NODE x dim
# output
h_out = self.MLP_layer(h)
return h_out, Hs
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss | 2,684 | 33.87013 | 109 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/bi_gcn_net_for_Eval.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.bi_gcn_layer import biGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet_for_Eval(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
cnt=0
hs = [] # for feature analysis
hs.append(h) # initial features
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
hs.append(h)
cnt+=1
Hs = torch.stack(hs,dim=0) # L x NODE x dim
# output
h_out = self.MLP_layer(h)
return h_out, Hs, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,752 | 35.007576 | 140 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/bi_graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.bi_graphsage_layer import biGraphSageLayer
from layers.mlp_readout_layer import MLPReadout
class biGraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) ])
self.layers.append(biGraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, self.assign_dim, self.sigma, residual))
for _ in range(n_layers-3):
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual))
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
# output
h_out = self.MLP_layer(h)
return h_out, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,819 | 37.56 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/SBMs_node_classification/bi_gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.bi_gat_layer import biGATLayer
from layers.mlp_readout_layer import MLPReadout
class biGATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
self.num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * self.num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual) ])
self.layers.append(biGATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual))
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, 1,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
# output
h_out = self.MLP_layer(h)
return h_out, self.s
def sup_loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den + 1e-09)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss + 1e-09, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,771 | 37.796748 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/superpixels_graph_classification/bi_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.bi_gcn_layer import biGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,128 | 41.132653 | 140 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/superpixels_graph_classification/bi_gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer
from layers.bi_gated_gcn_layer import biGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, sigma=self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
if not self.edge_feat: # edge feature set to 1
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e)
# convnets
cnt=0
for conv in self.layers:
if cnt==1:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
class biGatedGCNNet_IL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
for l in range(self.n_layers-1):
if l == self.n_layers-2:
if l % 2 == 0:
self.layers.append(biGatedGCNLayer(hidden_dim, out_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual))
else:
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout,
self.batch_norm, self.residual))
else:
if l % 2 == 0:
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual))
else:
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
if not self.edge_feat: # edge feature set to 1
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e)
# convnets
s = []
for conv in self.layers:
try:
h, e, tmp = conv(g, h, e)
s.append(tmp)
except:
h, e = conv(g, h, e)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.S
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / soft_assign.shape[0]
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / soft_assign.shape[0]
class biGatedGCNNet_ALL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual ) for _ in range(self.n_layers-1) ])
self.layers.append(biGatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
if not self.edge_feat: # edge feature set to 1
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e)
# convnets
s = []
for conv in self.layers:
h, e, tmp = conv(g, h, e)
s.append(tmp)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.S
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / self.n_layers
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / self.n_layers | 13,454 | 41.850318 | 149 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/superpixels_graph_classification/bi_graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.bi_graphsage_layer import biGraphSageLayer
from layers.mlp_readout_layer import MLPReadout
class biGraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.sigma = net_params['sigma']
self.pos_enc = net_params['pos_enc']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) ])
self.layers.append(biGraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, self.assign_dim, self.sigma, residual))
for _ in range(n_layers-3):
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual))
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt += 1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den + 1e-09)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss + 1e-09, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,447 | 40.962264 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/superpixels_graph_classification/bi_gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
import dgl
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.bi_gat_layer import biGATLayer
from layers.mlp_readout_layer import MLPReadout
class biGATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
self.num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.pos_enc = net_params['pos_enc']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.embedding_h = nn.Linear(in_dim, hidden_dim * self.num_heads)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual) ])
self.layers.append(biGATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual))
self.layers.append(GATLayer(hidden_dim * self.num_heads, out_dim, 1,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt += 1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,422 | 40.726415 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TUs_graph_classification/bi_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.bi_gcn_layer import biGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,137 | 40.79798 | 140 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TUs_graph_classification/bi_gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer
from layers.bi_gated_gcn_layer import biGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
e = self.embedding_e(e)
# convnets
cnt = 0
for conv in self.layers:
if cnt == 1:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,223 | 39.228571 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TUs_graph_classification/bi_graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.bi_graphsage_layer import biGraphSageLayer
from layers.mlp_readout_layer import MLPReadout
class biGraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) ])
self.layers.append(biGraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, self.assign_dim, self.sigma, residual))
for _ in range(n_layers-3):
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual))
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt += 1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den + 1e-09)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss + 1e-09, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,412 | 40.632075 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TUs_graph_classification/bi_gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import CustomGATLayer as GATLayer
from layers.bi_gat_layer import biGATLayer
from layers.mlp_readout_layer import MLPReadout
class biGATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
self.num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.dropout = dropout
self.embedding_h = nn.Linear(in_dim, hidden_dim * self.num_heads)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual) ])
self.layers.append(biGATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual))
self.layers.append(GATLayer(hidden_dim * self.num_heads, out_dim, 1,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
h[torch.isinf(h)] = 1e+9 # clamping
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den + 1e-09)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss + 1e-09, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
| 4,427 | 41.171429 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TSP_edge_classification/bi_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.bi_gcn_layer import biGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.assign_dim = net_params['assign_dim']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,101 | 40.857143 | 140 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TSP_edge_classification/bi_gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer
from layers.bi_gated_gcn_layer import biGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
self.sigma = net_params['sigma']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout, self.batch_norm,
self.assign_dim, self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
if not self.edge_feat:
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e.float())
# convnets
cnt=0
for conv in self.layers:
if cnt ==1:
h, e, self.s = conv(g, h, e)
else:
h, e = conv(g, h, e)
cnt+=1
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss
class biGatedGCNNet_IL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
self.sigma = net_params['sigma']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) ])
for l in range(self.n_layers-1):
if l == self.n_layers-2:
if l % 2 == 0:
self.layers.append(biGatedGCNLayer(hidden_dim, out_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual))
else:
self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout,
self.batch_norm, self.residual))
else:
if l % 2 == 0:
self.layers.append(biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual))
else:
self.layers.append(GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
if not self.edge_feat:
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e.float())
# convnets
s = []
for conv in self.layers:
try:
h, e, tmp = conv(g, h, e)
s.append(tmp)
except:
h, e = conv(g, h, e)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.S
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / soft_assign.shape[0]
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(soft_assign.shape[0]):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / soft_assign.shape[0]
class biGatedGCNNet_ALL(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
self.sigma = net_params['sigma']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ biGatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.assign_dim, self.sigma, self.residual ) for _ in range(self.n_layers-1) ])
self.layers.append(biGatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
if not self.edge_feat:
e = torch.ones_like(e).to(self.device)
e = self.embedding_e(e.float())
# convnets
s = []
for conv in self.layers:
h, e, tmp = conv(g, h, e)
s.append(tmp)
self.S = torch.stack(s,dim=0).to(self.device) # num_pool x sum_node x assign_dim
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.S
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
mincut_loss, ortho_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
out_adj = torch.mm(one_s.transpose(0,1),torch.sparse.mm(adj,one_s))
out_d = torch.mm(one_s.transpose(0,1),torch.sparse.mm(d,one_s))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss += -(mincut_num / mincut_den)
ss = torch.matmul(one_s.transpose(0, 1), one_s)
i_s = torch.eye(one_s.shape[1]).type_as(ss)
ortho_loss += torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return ( mincut_loss + ortho_loss ) / self.n_layers
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss, linkpred_loss = 0, 0
for l in range(self.n_layers):
one_s = soft_assign[l]
ent_loss += torch.distributions.Categorical(probs=one_s).entropy().mean(-1)
linkpred_loss += torch.add( -one_s.matmul(one_s.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return (ent_loss + linkpred_loss) / self.n_layers | 14,061 | 41.741641 | 149 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TSP_edge_classification/bi_graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.bi_graphsage_layer import biGraphSageLayer
from layers.mlp_readout_layer import MLPReadout
class biGraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.sigma = net_params['sigma']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) ])
self.layers.append(biGraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, self.assign_dim, self.sigma, residual))
for _ in range(n_layers-3):
self.layers.append(GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual))
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,353 | 40.865385 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/TSP_edge_classification/bi_gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import CustomGATLayer as GATLayer
from layers.bi_gat_layer import biGATLayer
from layers.mlp_readout_layer import MLPReadout
class biGATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
self.num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.edge_feat = net_params['edge_feat']
self.sigma = net_params['sigma']
self.pos_enc = net_params['pos_enc']
self.sg_flag = True
self.assign_dim = net_params['assign_dim']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim * self.num_heads)
if self.edge_feat:
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim * self.num_heads)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual) ])
self.layers.append(biGATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.assign_dim, self.sigma, self.residual))
for _ in range(n_layers-3):
self.layers.append(GATLayer(hidden_dim * self.num_heads, hidden_dim, self.num_heads,
dropout, self.batch_norm, self.residual))
self.layers.append(GATLayer(hidden_dim * self.num_heads, out_dim, 1,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
#if not self.edge_feat:
#e = torch.ones_like(e).to(self.device)
#e = self.embedding_e(e.float())
cnt = 0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
h[torch.isinf(h)] = 1e+9 # clamping
cnt+=1
g.ndata['h'] = h
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
e = self.MLP_layer(e)
e[torch.isinf(e)] = 1e+9 # clamping
return {'e': e}
g.apply_edges(_edge_feat)
return g.edata['e'], self.s
def sup_loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den + 1e-09)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss + 1e-09, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,856 | 40.161017 | 135 | py |
bi-MP-HyeokjinK | bi-MP-HyeokjinK/nets/molecules_graph_regression/bi_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.bi_gcn_layer import biGCNLayer
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class biGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_type = net_params['num_atom_type']
num_bond_type = net_params['num_bond_type']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.sigma = net_params['sigma']
self.sg_flag = True
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.assign_dim = net_params['assign_dim']
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
self.layers = nn.ModuleList([ GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) ])
self.layers.append(biGCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm,
self.assign_dim,sigma=self.sigma, residual=self.residual))
for _ in range(n_layers-3):
self.layers.append(GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual))
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu,
dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
cnt=0
for conv in self.layers:
if cnt == 1:
h, self.s = conv(g, h)
else:
h = conv(g, h)
cnt+=1
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
elif self.readout == "mean":
hg = dgl.mean_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg), self.s
def sup_loss(self, scores, targets):
# loss = nn.MSELoss()(scores,targets)
loss = nn.L1Loss()(scores, targets)
return loss
def unsup_loss(self, g, soft_assign, mode):
if mode == 'mincut':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
d = torch.sparse_coo_tensor(torch.tensor([range(adj.size()[0]),range(adj.size()[0])]),
torch.sparse.sum(adj,dim=1).to_dense())
out_adj = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(adj,soft_assign))
out_d = torch.mm(soft_assign.transpose(0,1),torch.sparse.mm(d,soft_assign))
mincut_num = torch.einsum('ii->', out_adj)
mincut_den = torch.einsum('ii->', out_d)
mincut_loss = -(mincut_num / mincut_den)
ss = torch.matmul(soft_assign.transpose(0, 1), soft_assign)
i_s = torch.eye(soft_assign.shape[1]).type_as(ss)
ortho_loss = torch.norm(
ss / torch.norm(ss, dim=(-0, -1), keepdim=True) -
i_s / torch.norm(i_s), dim=(-0, -1))
return mincut_loss + ortho_loss
elif mode == 'diffpool':
adj = g.adjacency_matrix(transpose=True, ctx=soft_assign.device)
ent_loss = torch.distributions.Categorical(probs=soft_assign).entropy().mean(-1)
linkpred_loss = torch.add( -soft_assign.matmul(soft_assign.transpose(0,1)),adj).norm(dim=(0,1)) / (adj.size(0)*adj.size(1))
return ent_loss + linkpred_loss | 4,313 | 41.294118 | 135 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.