INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Writes gradient statistics for critic to Tensorboard.
def _write_critic_model_stats(self, iteration:int)->None: "Writes gradient statistics for critic to Tensorboard." critic = self.learn.gan_trainer.critic self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats') self.crit_stats_updated = T...
Writes gradient statistics to Tensorboard.
def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." # We don't want to write stats when model is not iterated on and hence has zeroed out gradients gen_mode = self.learn.gan_trainer.gen_mode if gen_mode and not self.gen_stats_updated: self._wri...
Writes training loss to Tensorboard.
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None: "Writes training loss to Tensorboard." recorder = self.learn.gan_trainer.recorder if len(recorder.losses) == 0: return scalar_value = to_np((recorder.losses[-1:])[0]) tag = self.metrics_root + 'train_loss' ...
Writes model generated, original and real images to Tensorboard.
def _write_images(self, iteration:int)->None: "Writes model generated, original and real images to Tensorboard." trainer = self.learn.gan_trainer #TODO: Switching gen_mode temporarily seems a bit hacky here. Certainly not a good side-effect. Is there a better way? gen_mode = trainer.g...
Callback function that writes batch end appropriate data to Tensorboard.
def on_batch_end(self, iteration:int, **kwargs)->None: "Callback function that writes batch end appropriate data to Tensorboard." super().on_batch_end(iteration=iteration, **kwargs) if iteration == 0: return if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
Callback function that writes backward end appropriate data to Tensorboard.
def on_backward_end(self, iteration:int, **kwargs)->None: "Callback function that writes backward end appropriate data to Tensorboard." if iteration == 0: return self._update_batches_if_needed() #TODO: This could perhaps be implemented as queues of requests instead but that seemed like ...
Writes model generated, original and real images to Tensorboard
def _write_images(self, iteration:int)->None: "Writes model generated, original and real images to Tensorboard" self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration, tbwriter=self.tbwriter)
Queues up an asynchronous write request to Tensorboard.
def request_write(self, request: TBWriteRequest)->None: "Queues up an asynchronous write request to Tensorboard." if self.stop_request.isSet(): return self.queue.put(request)
Processes queued up write requests asynchronously to Tensorboard.
def _queue_processor(self)->None: "Processes queued up write requests asynchronously to Tensorboard." while not self.stop_request.isSet(): while not self.queue.empty(): if self.stop_request.isSet(): return request = self.queue.get() request.wri...
Factory method to convert a batch of model images to a list of ModelImageSet.
def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]: "Factory method to convert a batch of model images to a list of ModelImageSet." image_sets = [] x,y = batch[0],batch[1] preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True) for ori...
Writes single model histogram to Tensorboard.
def _write_histogram(self, param_name:str, values)->None: "Writes single model histogram to Tensorboard." tag = self.name + '/weights/' + param_name self.tbwriter.add_histogram(tag=tag, values=values, global_step=self.iteration)
Writes model histograms to Tensorboard.
def write(self)->None: "Writes model histograms to Tensorboard." for param_name, values in self.params: self._write_histogram(param_name=param_name, values=values)
Writes model histograms to Tensorboard.
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model')->None: "Writes model histograms to Tensorboard." request = HistogramTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name) asyncTBWriter.request_write(request)
Writes a single scalar value for a gradient statistic to Tensorboard.
def _add_gradient_scalar(self, name:str, scalar_value)->None: "Writes a single scalar value for a gradient statistic to Tensorboard." tag = self.name + '/gradients/' + name self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration)
Writes the average norm of the gradients to Tensorboard.
def _write_avg_norm(self, norms:[])->None: "Writes the average norm of the gradients to Tensorboard." avg_norm = sum(norms)/len(self.gradients) self._add_gradient_scalar('avg_norm', scalar_value=avg_norm)
Writes the median norm of the gradients to Tensorboard.
def _write_median_norm(self, norms:[])->None: "Writes the median norm of the gradients to Tensorboard." median_norm = statistics.median(norms) self._add_gradient_scalar('median_norm', scalar_value=median_norm)
Writes the maximum norm of the gradients to Tensorboard.
def _write_max_norm(self, norms:[])->None: "Writes the maximum norm of the gradients to Tensorboard." max_norm = max(norms) self._add_gradient_scalar('max_norm', scalar_value=max_norm)
Writes the minimum norm of the gradients to Tensorboard.
def _write_min_norm(self, norms:[])->None: "Writes the minimum norm of the gradients to Tensorboard." min_norm = min(norms) self._add_gradient_scalar('min_norm', scalar_value=min_norm)
Writes the number of zeroes in the gradients to Tensorboard.
def _write_num_zeros(self)->None: "Writes the number of zeroes in the gradients to Tensorboard." gradient_nps = [to_np(x.data) for x in self.gradients] num_zeros = sum((np.asarray(x) == 0.0).sum() for x in gradient_nps) self._add_gradient_scalar('num_zeros', scalar_value=num_zeros)
Writes the average of the gradients to Tensorboard.
def _write_avg_gradient(self)->None: "Writes the average of the gradients to Tensorboard." avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients) self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
Writes the median of the gradients to Tensorboard.
def _write_median_gradient(self)->None: "Writes the median of the gradients to Tensorboard." median_gradient = statistics.median(x.data.median() for x in self.gradients) self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
Writes the maximum of the gradients to Tensorboard.
def _write_max_gradient(self)->None: "Writes the maximum of the gradients to Tensorboard." max_gradient = max(x.data.max() for x in self.gradients) self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
Writes the minimum of the gradients to Tensorboard.
def _write_min_gradient(self)->None: "Writes the minimum of the gradients to Tensorboard." min_gradient = min(x.data.min() for x in self.gradients) self._add_gradient_scalar('min_gradient', scalar_value=min_gradient)
Writes model gradient statistics to Tensorboard.
def write(self)->None: "Writes model gradient statistics to Tensorboard." if len(self.gradients) == 0: return norms = [x.data.norm() for x in self.gradients] self._write_avg_norm(norms=norms) self._write_median_norm(norms=norms) self._write_max_norm(norms=norms) s...
Writes list of images as tensors to Tensorboard.
def _write_images(self, name:str, images:[Tensor])->None: "Writes list of images as tensors to Tensorboard." tag = self.ds_type.name + ' ' + name self.tbwriter.add_image(tag=tag, img_tensor=vutils.make_grid(images, normalize=True), global_step=self.iteration)
Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images.
def _get_image_tensors(self)->([Tensor], [Tensor], [Tensor]): "Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images." orig_images, gen_images, real_images = [], [], [] for image_set in self.image_sets: orig_images.append(im...
Writes original, generated and real(target) images to Tensorboard.
def write(self)->None: "Writes original, generated and real(target) images to Tensorboard." orig_images, gen_images, real_images = self._get_image_tensors() self._write_images(name='orig images', images=orig_images) self._write_images(name='gen images', images=gen_images) self._...
Writes training and validation batch images to Tensorboard.
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None: "Writes training and validation batch images to Tensorboard." self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid) sel...
Writes batch images of specified DatasetType to Tensorboard.
def _write_for_dstype(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType)->None: "Writes batch images of specified DatasetType to Tensorboard." request = ImageTBRequest(learn=learn, batch=batch, iteration=iteration, tbwriter=tbwriter, ds_type=ds_type) as...
Writes single model graph to Tensorboard.
def write(self)->None: "Writes single model graph to Tensorboard." self.tbwriter.add_graph(model=self.model, input_to_model=self.input_to_model)
Writes model graph to Tensorboard.
def write(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor)->None: "Writes model graph to Tensorboard." request = GraphTBRequest(model=model, tbwriter=tbwriter, input_to_model=input_to_model) asyncTBWriter.request_write(request)
During training, batch norm layers keep track of a running mean and variance of the previous layer's activations. Because the parameters of the SWA model are computed as the average of other models' parameters, the SWA model never sees the training data itself, and therefore has no opportunity to comput...
def fix_batchnorm(swa_model, train_dl): """ During training, batch norm layers keep track of a running mean and variance of the previous layer's activations. Because the parameters of the SWA model are computed as the average of other models' parameters, the SWA model never sees the training data it...
Wraps h in new Variables, to detach them from their history.
def repackage_var(h): """Wraps h in new Variables, to detach them from their history.""" if IS_TORCH_04: return h.detach() if type(h) == torch.Tensor else tuple(repackage_var(v) for v in h) else: return Variable(h.data) if type(h) == Variable else tuple(repackage_var(v) for v in h)
Returns a SequentialRNN model. A RNN_Encoder layer is instantiated using the parameters provided. This is followed by the creation of a LinearDecoder layer. Also by default (i.e. tie_weights = True), the embedding matrix used in the RNN_Encoder is used to instantiate the weights for the LinearDecode...
def get_language_model(n_tok, emb_sz, n_hid, n_layers, pad_token, dropout=0.4, dropouth=0.3, dropouti=0.5, dropoute=0.1, wdrop=0.5, tie_weights=True, qrnn=False, bias=False): """Returns a SequentialRNN model. A RNN_Encoder layer is instantiated using the parameters provided. This is follo...
Invoked during the forward propagation of the RNN_Encoder module. Args: input (Tensor): input of shape (sentence length x batch_size) Returns: raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using dropouth, list ...
def forward(self, input): """ Invoked during the forward propagation of the RNN_Encoder module. Args: input (Tensor): input of shape (sentence length x batch_size) Returns: raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer wit...
Replace repetitions at the character level in `t`.
def replace_rep(t:str) -> str: "Replace repetitions at the character level in `t`." def _replace_rep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_REP} {len(cc)+1} {c} ' re_rep = re.compile(r'(\S)(\1{3,})') return re_rep.sub(_replace_rep, t)
Replace word repetitions in `t`.
def replace_wrep(t:str) -> str: "Replace word repetitions in `t`." def _replace_wrep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_WREP} {len(cc.split())+1} {c} ' re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})') return re_wrep.sub(_replace_wrep, t)
List of replacements from html strings in `x`.
def fix_html(x:str) -> str: "List of replacements from html strings in `x`." re1 = re.compile(r' +') x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace( 'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace( '<br />', "\n").replace(...
Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before.
def replace_all_caps(x:Collection[str]) -> Collection[str]: "Replace tokens in ALL CAPS in `x` by their lower version and add `TK_UP` before." res = [] for t in x: if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower()) else: res.append(t) return res
Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before.
def deal_caps(x:Collection[str]) -> Collection[str]: "Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before." res = [] for t in x: if t == '': continue if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ) res.append(t.lower()) re...
Process one text `t` with tokenizer `tok`.
def process_text(self, t:str, tok:BaseTokenizer) -> List[str]: "Process one text `t` with tokenizer `tok`." for rule in self.pre_rules: t = rule(t) toks = tok.tokenizer(t) for rule in self.post_rules: toks = rule(toks) return toks
Process a list of `texts` in one process.
def _process_all_1(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts` in one process." tok = self.tok_func(self.lang) if self.special_cases: tok.add_special_cases(self.special_cases) return [self.process_text(str(t), tok) for t in texts]
Process a list of `texts`.
def process_all(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts`." if self.n_cpus <= 1: return self._process_all_1(texts) with ProcessPoolExecutor(self.n_cpus) as e: return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), [])
Convert a list of tokens `t` to their ids.
def numericalize(self, t:Collection[str]) -> List[int]: "Convert a list of tokens `t` to their ids." return [self.stoi[w] for w in t]
Convert a list of `nums` to their tokens.
def textify(self, nums:Collection[int], sep=' ') -> List[str]: "Convert a list of `nums` to their tokens." return sep.join([self.itos[i] for i in nums]) if sep is not None else [self.itos[i] for i in nums]
Create a vocabulary from a set of `tokens`.
def create(cls, tokens:Tokens, max_vocab:int, min_freq:int) -> 'Vocab': "Create a vocabulary from a set of `tokens`." freq = Counter(p for o in tokens for p in o) itos = [o for o,c in freq.most_common(max_vocab) if c >= min_freq] for o in reversed(defaults.text_spec_tok): if ...
Load the `Vocab` contained in `path`
def load(cls, path): "Load the `Vocab` contained in `path`" itos = pickle.load(open(path, 'rb')) return cls(itos)
plots loss function as function of iterations. When used in Jupyternotebook, plot will be displayed in notebook. Else, plot will be displayed in console and both plot and loss are saved in save_path.
def plot_loss(self, n_skip=10, n_skip_end=5): ''' plots loss function as function of iterations. When used in Jupyternotebook, plot will be displayed in notebook. Else, plot will be displayed in console and both plot and loss are saved in save_path. ''' if not in_ipynb(): plt.s...
Plots learning rate in jupyter notebook or console, depending on the enviroment of the learner.
def plot_lr(self): '''Plots learning rate in jupyter notebook or console, depending on the enviroment of the learner.''' if not in_ipynb(): plt.switch_backend('agg') if self.record_mom: fig, axs = plt.subplots(1,2,figsize=(12,4)) for i in range(0,2): axs[i].se...
Plots the loss function with respect to learning rate, in log scale.
def plot(self, n_skip=10, n_skip_end=5): ''' Plots the loss function with respect to learning rate, in log scale. ''' plt.ylabel("validation loss") plt.xlabel("learning rate (log scale)") plt.plot(self.lrs[n_skip:-(n_skip_end+1)], self.losses[n_skip:-(n_skip_end+1)]) ...
Plots the lr rate/momentum schedule
def plot_lr(self, show_text=True, show_moms=True): """Plots the lr rate/momentum schedule""" phase_limits = [0] for nb_batch, phase in zip(self.nb_batches, self.phases): phase_limits.append(phase_limits[-1] + nb_batch * phase.epochs) if not in_ipynb(): plt.switch_...
Distributed training of Imagenette.
def main( gpu:Param("GPU to run on", str)=None, woof: Param("Use imagewoof (otherwise imagenette)", int)=0, lr: Param("Learning rate", float)=1e-3, size: Param("Size (px: 128,192,224)", int)=128, alpha: Param("Alpha", float)=0.99, mom: Param("Momentum", float)=0.9, ...
Test if `last_loss` is NaN and interrupts training.
def on_batch_end(self, last_loss, epoch, num_batch, **kwargs:Any)->None: "Test if `last_loss` is NaN and interrupts training." if self.stop: return True #to skip validation after stopping during training if torch.isnan(last_loss): print (f'Epoch/Batch ({epoch}/{num_batch}): Invalid l...
Initializes the best value.
def on_train_begin(self, **kwargs:Any)->None: "Initializes the best value." self.best = float('inf') if self.operator == np.less else -float('inf')
Pick the monitored value.
def get_monitor_value(self): "Pick the monitored value." if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None elif len(self.learn.recorder.val_losses) == 0: return None values = {'train_loss':self.learn.recorder.losses[-1].cpu().numpy(), 'va...
Compare the value monitored to its best score and maybe save the model.
def on_epoch_end(self, epoch:int, **kwargs:Any)->None: "Compare the value monitored to its best score and maybe save the model." if self.every=="epoch": self.learn.save(f'{self.name}_{epoch}') else: #every="improvement" current = self.get_monitor_value() if current is not...
Load the best model.
def on_train_end(self, **kwargs): "Load the best model." if self.every=="improvement" and (self.learn.path/f'{self.learn.model_dir}/{self.name}.pth').is_file(): self.learn.load(f'{self.name}', purge=False)
Initialize inner arguments.
def on_train_begin(self, **kwargs:Any)->None: "Initialize inner arguments." self.wait, self.opt = 0, self.learn.opt super().on_train_begin(**kwargs)
Compare the value monitored to its best and maybe reduce lr.
def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best and maybe reduce lr." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: ...
Convert a notebook `fname` to html file in `dest_path`.
def convert_nb(fname, dest_path='.'): "Convert a notebook `fname` to html file in `dest_path`." from .gen_notebooks import remove_undoc_cells, remove_code_cell_jupyter_widget_state_elem nb = read_nb(fname) nb['cells'] = remove_undoc_cells(nb['cells']) nb['cells'] = remove_code_cell_jupyter_widget_st...
Convert modified notebooks in `folder` to html pages in `dest_path`.
def convert_all(folder, dest_path='.', force_all=False): "Convert modified notebooks in `folder` to html pages in `dest_path`." path = Path(folder) changed_cnt = 0 for fname in path.glob("*.ipynb"): # only rebuild modified files fname_out = Path(dest_path)/fname.with_suffix('.html').nam...
Function that collect samples and adds padding. Flips token order if needed
def pad_collate(samples:BatchSamples, pad_idx:int=1, pad_first:bool=True, backwards:bool=False) -> Tuple[LongTensor, LongTensor]: "Function that collect samples and adds padding. Flips token order if needed" samples = to_data(samples) max_len = max([len(s[0]) for s in samples]) res = torch.zeros(len(sam...
Read the text in `fn`.
def open_text(fn:PathOrStr, enc='utf-8'): "Read the text in `fn`." with open(fn,'r', encoding = enc) as f: return ''.join(f.readlines())
Create the ragged array that will be filled when we ask for items.
def allocate_buffers(self): "Create the ragged array that will be filled when we ask for items." if self.ite_len is None: len(self) self.idx = LanguageModelPreLoader.CircularIndex(len(self.dataset.x.items), not self.backwards) self.batch = np.zeros((self.bs, self.bptt+1), dtype=np.int6...
Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented
def fill_row(self, forward, items, idx, row, ro, ri, overlap,lengths): "Fill the row with tokens from the ragged array. --OBS-- overlap != 1 has not been implemented" ibuf = n = 0 ro -= 1 while ibuf < row.size: ro += 1 ix = idx[ro] rag = items[...
Create a `TextDataBunch` from ids, labels and a `vocab`. `kwargs` are passed to the dataloader creation.
def from_ids(cls, path:PathOrStr, vocab:Vocab, train_ids:Collection[Collection[int]], valid_ids:Collection[Collection[int]], test_ids:Collection[Collection[int]]=None, train_lbls:Collection[Union[int,float]]=None, valid_lbls:Collection[Union[int,float]]=None, classes:Collection[Any]=No...
Load a `TextDataBunch` from `path/cache_name`. `kwargs` are passed to the dataloader creation.
def load(cls, path:PathOrStr, cache_name:PathOrStr='tmp', processor:PreProcessor=None, **kwargs): "Load a `TextDataBunch` from `path/cache_name`. `kwargs` are passed to the dataloader creation." warn("""This method is deprecated and only kept to load data serialized in v1.0.43 or earlier. ...
Create a `TextDataBunch` from tokens and labels. `kwargs` are passed to the dataloader creation.
def from_tokens(cls, path:PathOrStr, trn_tok:Collection[Collection[str]], trn_lbls:Collection[Union[int,float]], val_tok:Collection[Collection[str]], val_lbls:Collection[Union[int,float]], vocab:Vocab=None, tst_tok:Collection[Collection[str]]=None, classes:Collection[Any]=None, max_voc...
Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation.
def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=6...
Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation.
def from_csv(cls, path:PathOrStr, csv_name, valid_pct:float=0.2, test:Optional[str]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, delimiter:str=None, header='infer', text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, ...
Create a `TextDataBunch` from text files in folders.
def from_folder(cls, path:PathOrStr, train:str='train', valid:str='valid', test:Optional[str]=None, classes:Collection[Any]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, i...
Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None, num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate, dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> ...
Function that transform the `datasets` in a `DataBunch` for classification. Passes `**dl_kwargs` on to `DataLoader()`
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=32, val_bs:int=None, pad_idx=1, pad_first=True, device:torch.device=None, no_check:bool=False, backwards:bool=False, **dl_kwargs) -> DataBunch: "Function that transform the `datasets` in a `DataBunch` for classification....
A special labelling method for language models.
def label_for_lm(self, **kwargs): "A special labelling method for language models." self.__class__ = LMTextList kwargs['label_cls'] = LMLabelList return self.label_const(0, **kwargs)
Get the list of files in `path` that have a text suffix. `recurse` determines if we search subfolders.
def from_folder(cls, path:PathOrStr='.', extensions:Collection[str]=text_extensions, vocab:Vocab=None, processor:PreProcessor=None, **kwargs)->'TextList': "Get the list of files in `path` that have a text suffix. `recurse` determines if we search subfolders." processor = ifnone(proce...
Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed.
def show_xys(self, xs, ys, max_len:int=70)->None: "Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed." from IPython.display import display, HTML names = ['idx','text'] if self._is_lm else ['text','target'] items = [] for i, (x,y) in en...
r"""InceptionV4 model architecture from the `"Inception-v4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
def inceptionv4(pretrained=True): r"""InceptionV4 model architecture from the `"Inception-v4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = InceptionV4() if pretrained: model...
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions fro...
def predict_array(self, arr): """ This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: ...
Distrubuted training of CIFAR-10. Fastest speed is if you run as follows: python -m fastai.launch train_cifar.py
def main( gpu:Param("GPU to run on", str)=None ): """Distrubuted training of CIFAR-10. Fastest speed is if you run as follows: python -m fastai.launch train_cifar.py""" gpu = setup_distrib(gpu) n_gpus = num_distrib() path = url2path(URLs.CIFAR) ds_tfms = ([*rand_pad(4, 32), flip_lr(p=0.5...
Set default values for options.
def initialize_options(self): """Set default values for options.""" self.dep_groups = '' self.dep_quote = False self.dep_conda = False
Run command.
def run(self): """Run command.""" wanted_groups = self.parse() deps = [] invalid_groups = [] for grp in wanted_groups: if grp in dep_groups: deps.extend(dep_groups[grp]) else: invalid_groups.append(grp) if invalid_groups or not wa...
Return the saved feature indexes that will be concatenated Inputs: sfs (list): saved features by hook function, in other words intermediate activations last (bool): whether to concatenate only last different activation, or all from the encoder model
def get_sfs_idxs(sfs, last=True): """ Return the saved feature indexes that will be concatenated Inputs: sfs (list): saved features by hook function, in other words intermediate activations last (bool): whether to concatenate only last different activation, or all from the encoder model ...
Return a `Hook` that stores activations of `module` in `self.stored`
def hook_output (module:nn.Module, detach:bool=True, grad:bool=False)->Hook: "Return a `Hook` that stores activations of `module` in `self.stored`" return Hook(module, _hook_inner, detach=detach, is_forward=not grad)
Return `Hooks` that store activations of all `modules` in `self.stored`
def hook_outputs(modules:Collection[nn.Module], detach:bool=True, grad:bool=False)->Hooks: "Return `Hooks` that store activations of all `modules` in `self.stored`" return Hooks(modules, _hook_inner, detach=detach, is_forward=not grad)
Create a dummy batch to go through `m` with `size`.
def dummy_batch(m: nn.Module, size:tuple=(64,64))->Tensor: "Create a dummy batch to go through `m` with `size`." ch_in = in_channels(m) return one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)
Pass a `dummy_batch` in evaluation mode in `m` with `size`.
def dummy_eval(m:nn.Module, size:tuple=(64,64)): "Pass a `dummy_batch` in evaluation mode in `m` with `size`." return m.eval()(dummy_batch(m, size))
Pass a dummy input through the model `m` to get the various sizes of activations.
def model_sizes(m:nn.Module, size:tuple=(64,64))->Tuple[Sizes,Tensor,Hooks]: "Pass a dummy input through the model `m` to get the various sizes of activations." with hook_outputs(m) as hooks: x = dummy_eval(m, size) return [o.stored.shape for o in hooks]
Return the number of output features for `model`.
def num_features_model(m:nn.Module)->int: "Return the number of output features for `model`." sz = 64 while True: try: return model_sizes(m, size=(sz,sz))[-1][1] except Exception as e: sz *= 2 if sz > 2048: raise
Pass a dummy input through the model to get the various sizes. Returns (res,x,hooks) if `full`
def params_size(m: Union[nn.Module,Learner], size: tuple = (3, 64, 64))->Tuple[Sizes, Tensor, Hooks]: "Pass a dummy input through the model to get the various sizes. Returns (res,x,hooks) if `full`" if isinstance(m, Learner): if m.data.is_empty: raise Exception("This is an empty `Learner` an...
Print a summary of `m` using a output text width of `n` chars
def model_summary(m:Learner, n:int=70): "Print a summary of `m` using a output text width of `n` chars" info = layers_info(m) header = ["Layer (type)", "Output Shape", "Param #", "Trainable"] res = "=" * n + "\n" res += f"{header[0]:<20} {header[1]:<20} {header[2]:<10} {header[3]:<10}\n" res += ...
Applies `hook_func` to `module`, `input`, `output`.
def hook_fn(self, module:nn.Module, input:Tensors, output:Tensors): "Applies `hook_func` to `module`, `input`, `output`." if self.detach: input = (o.detach() for o in input ) if is_listy(input ) else input.detach() output = (o.detach() for o in output) if is_listy(output) else o...
Remove the hook from the model.
def remove(self): "Remove the hook from the model." if not self.removed: self.hook.remove() self.removed=True
Register the `Hooks` on `self.modules`.
def on_train_begin(self, **kwargs): "Register the `Hooks` on `self.modules`." if not self.modules: self.modules = [m for m in flatten_model(self.learn.model) if hasattr(m, 'weight')] self.hooks = Hooks(self.modules, self.hook)
Take the mean and std of `o`.
def hook(self, m:nn.Module, i:Tensors, o:Tensors)->Tuple[Rank0Tensor,Rank0Tensor]: "Take the mean and std of `o`." return o.mean().item(),o.std().item()
Take the stored results and puts it in `self.stats`
def on_batch_end(self, train, **kwargs): "Take the stored results and puts it in `self.stats`" if train: self.stats.append(self.hooks.stored)
Plots images given image files. Arguments: im_paths (list): list of paths figsize (tuple): figure size rows (int): number of rows titles (list): list of titles maintitle (string): main title
def plots_from_files(imspaths, figsize=(10,5), rows=1, titles=None, maintitle=None): """Plots images given image files. Arguments: im_paths (list): list of paths figsize (tuple): figure size rows (int): number of rows titles (list): list of titles maintitle (string): mai...
Displays the images and their probabilities of belonging to a certain class Arguments: idxs (numpy.ndarray): indexes of the image samples from the dataset y (int): the selected class Returns: Plots the images in n rows [rows = n]
def plot_val_with_title(self, idxs, y): """ Displays the images and their probabilities of belonging to a certain class Arguments: idxs (numpy.ndarray): indexes of the image samples from the dataset y (int): the selected class Returns: Pl...
Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False every...
def most_by_mask(self, mask, y, mult): """ Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contain...
Extracts the first 4 most uncertain indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere el...
def most_uncertain_by_mask(self, mask, y): """ Extracts the first 4 most uncertain indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains Tr...
Extracts the predicted classes which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False) Arguments: y (int): the selected class is_correct (boolean): a boolean flag (True, False) which spec...
def most_by_correct(self, y, is_correct): """ Extracts the predicted classes which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False) Arguments: y (int): the selected class is_corr...