repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
imgclsmob
imgclsmob-master/pytorch/metrics/cls_metrics.py
""" Evaluation Metrics for Image Classification. """ import numpy as np import torch from .metric import EvalMetric __all__ = ['Top1Error', 'TopKError'] class Accuracy(EvalMetric): """ Computes accuracy classification score. Parameters: ---------- axis : int, default 1 The axis that represents classes name : str, default 'accuracy' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, axis=1, name="accuracy", output_names=None, label_names=None): super(Accuracy, self).__init__( name, axis=axis, output_names=output_names, label_names=label_names, has_global_stats=True) self.axis = axis def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data with class indices as values, one per sample. preds : torch.Tensor Prediction values for samples. Each prediction value can either be the class index, or a vector of likelihoods for all classes. """ assert (len(labels) == len(preds)) with torch.no_grad(): if preds.shape != labels.shape: pred_label = torch.argmax(preds, dim=self.axis) else: pred_label = preds pred_label = pred_label.cpu().numpy().astype(np.int32) label = labels.cpu().numpy().astype(np.int32) label = label.flat pred_label = pred_label.flat num_correct = (pred_label == label).sum() self.sum_metric += num_correct self.global_sum_metric += num_correct self.num_inst += len(pred_label) self.global_num_inst += len(pred_label) class TopKAccuracy(EvalMetric): """ Computes top k predictions accuracy. Parameters: ---------- top_k : int, default 1 Whether targets are in top k predictions. name : str, default 'top_k_accuracy' Name of this metric instance for display. torch_like : bool, default True Whether to use pytorch-like algorithm. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, top_k=1, name="top_k_accuracy", torch_like=True, output_names=None, label_names=None): super(TopKAccuracy, self).__init__( name, top_k=top_k, output_names=output_names, label_names=label_names, has_global_stats=True) self.top_k = top_k assert (self.top_k > 1), "Please use Accuracy if top_k is no more than 1" self.name += "_{:d}".format(self.top_k) self.torch_like = torch_like def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data. preds : torch.Tensor Predicted values. """ assert (len(labels) == len(preds)) with torch.no_grad(): if self.torch_like: _, pred = preds.topk(k=self.top_k, dim=1, largest=True, sorted=True) pred = pred.t() correct = pred.eq(labels.view(1, -1).expand_as(pred)) # num_correct = correct.view(-1).float().sum(dim=0, keepdim=True).item() num_correct = correct.flatten().float().sum(dim=0, keepdim=True).item() num_samples = labels.size(0) assert (num_correct <= num_samples) self.sum_metric += num_correct self.global_sum_metric += num_correct self.num_inst += num_samples self.global_num_inst += num_samples else: assert(len(preds.shape) <= 2), "Predictions should be no more than 2 dims" pred_label = preds.cpu().numpy().astype(np.int32) pred_label = np.argpartition(pred_label, -self.top_k) label = labels.cpu().numpy().astype(np.int32) assert (len(label) == len(pred_label)) num_samples = pred_label.shape[0] num_dims = len(pred_label.shape) if num_dims == 1: num_correct = (pred_label.flat == label.flat).sum() self.sum_metric += num_correct self.global_sum_metric += num_correct elif num_dims == 2: num_classes = pred_label.shape[1] top_k = min(num_classes, self.top_k) for j in range(top_k): num_correct = (pred_label[:, num_classes - 1 - j].flat == label.flat).sum() self.sum_metric += num_correct self.global_sum_metric += num_correct self.num_inst += num_samples self.global_num_inst += num_samples class Top1Error(Accuracy): """ Computes top-1 error (inverted accuracy classification score). Parameters: ---------- axis : int, default 1 The axis that represents classes. name : str, default 'top_1_error' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, axis=1, name="top_1_error", output_names=None, label_names=None): super(Top1Error, self).__init__( axis=axis, name=name, output_names=output_names, label_names=label_names) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return self.name, float("nan") else: return self.name, 1.0 - self.sum_metric / self.num_inst class TopKError(TopKAccuracy): """ Computes top-k error (inverted top k predictions accuracy). Parameters: ---------- top_k : int Whether targets are out of top k predictions, default 1 name : str, default 'top_k_error' Name of this metric instance for display. torch_like : bool, default True Whether to use pytorch-like algorithm. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, top_k=1, name="top_k_error", torch_like=True, output_names=None, label_names=None): name_ = name super(TopKError, self).__init__( top_k=top_k, name=name, torch_like=torch_like, output_names=output_names, label_names=label_names) self.name = name_.replace("_k_", "_{}_".format(top_k)) def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return self.name, float("nan") else: return self.name, 1.0 - self.sum_metric / self.num_inst
8,783
33.996016
99
py
imgclsmob
imgclsmob-master/pytorch/metrics/det_metrics.py
""" Evaluation Metrics for Object Detection. """ import warnings import numpy as np import mxnet as mx __all__ = ['CocoDetMApMetric'] class CocoDetMApMetric(mx.metric.EvalMetric): """ Detection metric for COCO bbox task. Parameters: ---------- img_height : int Processed image height. coco_annotations_file_path : str COCO anotation file path. contiguous_id_to_json : list of int Processed IDs. validation_ids : bool, default False Whether to use temporary file for estimation. use_file : bool, default False Whether to use temporary file for estimation. score_thresh : float, default 0.05 Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results. data_shape : tuple of int, default is None If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions. This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be fixed for all validation images. post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h) If not None, the bounding boxes will be affine transformed rather than simply scaled. name : str, default 'mAP' Name of this metric instance for display. """ def __init__(self, img_height, coco_annotations_file_path, contiguous_id_to_json, validation_ids=None, use_file=False, score_thresh=0.05, data_shape=None, post_affine=None, name="mAP"): super(CocoDetMApMetric, self).__init__(name=name) self.img_height = img_height self.coco_annotations_file_path = coco_annotations_file_path self.contiguous_id_to_json = contiguous_id_to_json self.validation_ids = validation_ids self.use_file = use_file self.score_thresh = score_thresh self.current_idx = 0 self.coco_result = [] if isinstance(data_shape, (tuple, list)): assert len(data_shape) == 2, "Data shape must be (height, width)" elif not data_shape: data_shape = None else: raise ValueError("data_shape must be None or tuple of int as (height, width)") self._data_shape = data_shape if post_affine is not None: assert self._data_shape is not None, "Using post affine transform requires data_shape" self._post_affine = post_affine else: self._post_affine = None from pycocotools.coco import COCO self.gt = COCO(self.coco_annotations_file_path) self._img_ids = sorted(self.gt.getImgIds()) def reset(self): self.current_idx = 0 self.coco_result = [] def get(self): """ Get evaluation metrics. """ if self.current_idx != len(self._img_ids): warnings.warn("Recorded {} out of {} validation images, incomplete results".format( self.current_idx, len(self._img_ids))) from pycocotools.coco import COCO gt = COCO(self.coco_annotations_file_path) import tempfile import json with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f: json.dump(self.coco_result, f) f.flush() pred = gt.loadRes(f.name) from pycocotools.cocoeval import COCOeval coco_eval = COCOeval(gt, pred, "bbox") if self.validation_ids is not None: coco_eval.params.imgIds = self.validation_ids coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return self.name, tuple(coco_eval.stats[:3]) def update2(self, pred_bboxes, pred_labels, pred_scores): """ Update internal buffer with latest predictions. Note that the statistics are not available until you call self.get() to return the metrics. Parameters: ---------- pred_bboxes : mxnet.NDArray or numpy.ndarray Prediction bounding boxes with shape `B, N, 4`. Where B is the size of mini-batch, N is the number of bboxes. pred_labels : mxnet.NDArray or numpy.ndarray Prediction bounding boxes labels with shape `B, N`. pred_scores : mxnet.NDArray or numpy.ndarray Prediction bounding boxes scores with shape `B, N`. """ def as_numpy(a): """ Convert a (list of) mx.NDArray into numpy.ndarray """ if isinstance(a, (list, tuple)): out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a] return np.concatenate(out, axis=0) elif isinstance(a, mx.nd.NDArray): a = a.asnumpy() return a for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]): valid_pred = np.where(pred_label.flat >= 0)[0] pred_bbox = pred_bbox[valid_pred, :].astype(np.float) pred_label = pred_label.flat[valid_pred].astype(int) pred_score = pred_score.flat[valid_pred].astype(np.float) imgid = self._img_ids[self.current_idx] self.current_idx += 1 affine_mat = None if self._data_shape is not None: entry = self.gt.loadImgs(imgid)[0] orig_height = entry["height"] orig_width = entry["width"] height_scale = float(orig_height) / self._data_shape[0] width_scale = float(orig_width) / self._data_shape[1] if self._post_affine is not None: affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0]) else: height_scale, width_scale = (1.0, 1.0) # for each bbox detection in each image for bbox, label, score in zip(pred_bbox, pred_label, pred_score): if label not in self.contiguous_id_to_json: # ignore non-exist class continue if score < self.score_thresh: continue category_id = self.contiguous_id_to_json[label] # rescale bboxes/affine transform bboxes if affine_mat is not None: bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat) bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat) else: bbox[[0, 2]] *= width_scale bbox[[1, 3]] *= height_scale # convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h] bbox[2:4] -= (bbox[:2] - 1) self.coco_result.append({"image_id": imgid, "category_id": category_id, "bbox": bbox[:4].tolist(), "score": score}) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data. preds : torch.Tensor Predicted values. """ assert (labels is not None) # label = labels.cpu().detach().numpy() pred = preds.cpu().detach().numpy() det_bboxes = [] det_ids = [] det_scores = [] bboxes = pred[:, :, :4] ids = pred[:, :, 4] scores = pred[:, :, 5] det_ids.append(ids) det_scores.append(scores) det_bboxes.append(bboxes.clip(0, self.img_height)) self.update2(det_bboxes, det_ids, det_scores) @staticmethod def affine_transform(pt, t): """ Apply affine transform to a bounding box given transform matrix t. Parameters: ---------- pt : numpy.ndarray Bounding box with shape (1, 2). t : numpy.ndarray Transformation matrix with shape (2, 3). Returns: ------- numpy.ndarray New bounding box with shape (1, 2). """ new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2]
8,548
36.495614
119
py
imgclsmob
imgclsmob-master/pytorch/metrics/hpe_metrics.py
""" Evaluation Metrics for Human Pose Estimation. """ from .metric import EvalMetric __all__ = ['CocoHpeOksApMetric'] class CocoHpeOksApMetric(EvalMetric): """ Detection metric for COCO Keypoint task. Parameters: ---------- coco_annotations_file_path : str COCO anotation file path. pose_postprocessing_fn : func An function for pose post-processing. use_file : bool, default False Whether to use temporary file for estimation. validation_ids : bool, default False Whether to use temporary file for estimation. name : str, default 'CocoOksAp' Name of this metric instance for display. """ def __init__(self, coco_annotations_file_path, pose_postprocessing_fn, validation_ids=None, use_file=False, name="CocoOksAp"): super(CocoHpeOksApMetric, self).__init__(name=name) self.coco_annotations_file_path = coco_annotations_file_path self.pose_postprocessing_fn = pose_postprocessing_fn self.validation_ids = validation_ids self.use_file = use_file self.coco_result = [] def reset(self): self.coco_result = [] def get(self): """ Get evaluation metrics. """ import copy from pycocotools.coco import COCO gt = COCO(self.coco_annotations_file_path) if self.use_file: import tempfile import json with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f: json.dump(self.coco_result, f) f.flush() pred = gt.loadRes(f.name) else: def calc_pred(coco, anns): import numpy as np import copy pred = COCO() pred.dataset["images"] = [img for img in coco.dataset["images"]] annsImgIds = [ann["image_id"] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(coco.getImgIds())) pred.dataset["categories"] = copy.deepcopy(coco.dataset["categories"]) for id, ann in enumerate(anns): s = ann["keypoints"] x = s[0::3] y = s[1::3] x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann["area"] = (x1 - x0) * (y1 - y0) ann["id"] = id + 1 ann["bbox"] = [x0, y0, x1 - x0, y1 - y0] pred.dataset["annotations"] = anns pred.createIndex() return pred pred = calc_pred(gt, copy.deepcopy(self.coco_result)) from pycocotools.cocoeval import COCOeval coco_eval = COCOeval(gt, pred, "keypoints") if self.validation_ids is not None: coco_eval.params.imgIds = self.validation_ids coco_eval.params.useSegm = None coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return self.name, tuple(coco_eval.stats[:3]) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data. preds : torch.Tensor Predicted values. """ label = labels.cpu().detach().numpy() pred = preds.cpu().detach().numpy() pred_pts_score, pred_person_score, label_img_id = self.pose_postprocessing_fn(pred, label) for idx in range(len(pred_pts_score)): image_id = int(label_img_id[idx]) kpt = pred_pts_score[idx].flatten().tolist() score = float(pred_person_score[idx]) self.coco_result.append({ "image_id": image_id, "category_id": 1, "keypoints": kpt, "score": score})
3,966
32.058333
98
py
imgclsmob
imgclsmob-master/pytorch/metrics/asr_metrics.py
""" Evaluation Metrics for Automatic Speech Recognition (ASR). """ from .metric import EvalMetric __all__ = ['WER'] class WER(EvalMetric): """ Computes Word Error Rate (WER) for Automatic Speech Recognition (ASR). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. name : str, default 'wer' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, vocabulary, name="wer", output_names=None, label_names=None): super(WER, self).__init__( name=name, output_names=output_names, label_names=label_names, has_global_stats=True) self.vocabulary = vocabulary self.ctc_decoder = CtcDecoder(vocabulary=vocabulary) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data with class indices as values, one per sample. preds : torch.Tensor Prediction values for samples. Each prediction value can either be the class index, or a vector of likelihoods for all classes. """ import editdistance labels_code = labels.cpu().numpy() labels = [] for label_code in labels_code: label_text = "".join([self.ctc_decoder.labels_map[c] for c in label_code]) labels.append(label_text) preds = preds[0] greedy_predictions = preds.transpose(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).cpu().numpy() preds = self.ctc_decoder(greedy_predictions) assert (len(labels) == len(preds)) for pred, label in zip(preds, labels): pred = pred.split() label = label.split() word_error_count = editdistance.eval(label, pred) word_count = max(len(label), len(pred)) assert (word_error_count <= word_count) self.sum_metric += word_error_count self.global_sum_metric += word_error_count self.num_inst += word_count self.global_num_inst += word_count class CtcDecoder(object): """ CTC decoder (to decode a sequence of labels to words). Parameters: ---------- vocabulary : list of str Vocabulary of the dataset. """ def __init__(self, vocabulary): super().__init__() self.blank_id = len(vocabulary) self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) def __call__(self, predictions): """ Decode a sequence of labels to words. Parameters: ---------- predictions : np.array of int or list of list of int Tensor with predicted labels. Returns: ------- list of str Words. """ hypotheses = [] for prediction in predictions: decoded_prediction = [] previous = self.blank_id for p in prediction: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p) previous = p hypothesis = "".join([self.labels_map[c] for c in decoded_prediction]) hypotheses.append(hypothesis) return hypotheses
3,814
30.528926
114
py
imgclsmob
imgclsmob-master/pytorch/metrics/metric.py
""" Several base metrics. """ __all__ = ['EvalMetric', 'CompositeEvalMetric', 'check_label_shapes'] from collections import OrderedDict def check_label_shapes(labels, preds, shape=False): """ Helper function for checking shape of label and prediction. Parameters: ---------- labels : list of torch.Tensor The labels of the data. preds : list of torch.Tensor Predicted values. shape : boolean If True, check the shape of labels and preds, otherwise only check their length. """ if not shape: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of predictions {}".format(label_shape, pred_shape)) class EvalMetric(object): """ Base class for all evaluation metrics. Parameters: ---------- name : str Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, name, output_names=None, label_names=None, **kwargs): super(EvalMetric, self).__init__() self.name = str(name) self.output_names = output_names self.label_names = label_names self._has_global_stats = kwargs.pop("has_global_stats", False) self._kwargs = kwargs self.reset() def __str__(self): return "EvalMetric: {}".format(dict(self.get_name_value())) def get_config(self): """ Save configurations of metric. Can be recreated from configs with metric.create(**config). """ config = self._kwargs.copy() config.update({ "metric": self.__class__.__name__, "name": self.name, "output_names": self.output_names, "label_names": self.label_names}) return config def update_dict(self, label, pred): """ Update the internal evaluation with named label and pred. Parameters: ---------- labels : OrderedDict of str -> torch.Tensor name to array mapping for labels. preds : OrderedDict of str -> torch.Tensor name to array mapping of predicted outputs. """ if self.output_names is not None: pred = [pred[name] for name in self.output_names] else: pred = list(pred.values()) if self.label_names is not None: label = [label[name] for name in self.label_names] else: label = list(label.values()) self.update(label, pred) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data. preds : torch.Tensor Predicted values. """ raise NotImplementedError() def reset(self): """ Resets the internal evaluation result to initial state. """ self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0 def reset_local(self): """ Resets the local portion of the internal evaluation results to initial state. """ self.num_inst = 0 self.sum_metric = 0.0 def get(self): """ Gets the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return self.name, float("nan") else: return self.name, self.sum_metric / self.num_inst def get_global(self): """ Gets the current global evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self._has_global_stats: if self.global_num_inst == 0: return self.name, float("nan") else: return self.name, self.global_sum_metric / self.global_num_inst else: return self.get() def get_name_value(self): """ Returns zipped name and value pairs. Returns: ------- list of tuples A (name, value) tuple list. """ name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) def get_global_name_value(self): """ Returns zipped name and value pairs for global results. Returns: ------- list of tuples A (name, value) tuple list. """ if self._has_global_stats: name, value = self.get_global() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) else: return self.get_name_value() class CompositeEvalMetric(EvalMetric): """ Manages multiple evaluation metrics. Parameters: ---------- name : str, default 'composite' Name of this metric instance for display. output_names : list of str, or None, default None Name of predictions that should be used when updating with update_dict. By default include all predictions. label_names : list of str, or None, default None Name of labels that should be used when updating with update_dict. By default include all labels. """ def __init__(self, name="composite", output_names=None, label_names=None): super(CompositeEvalMetric, self).__init__( name, output_names=output_names, label_names=label_names, has_global_stats=True) self.metrics = [] def add(self, metric): """ Adds a child metric. Parameters: ---------- metric A metric instance. """ self.metrics.append(metric) def update_dict(self, labels, preds): if self.label_names is not None: labels = OrderedDict([i for i in labels.items() if i[0] in self.label_names]) if self.output_names is not None: preds = OrderedDict([i for i in preds.items() if i[0] in self.output_names]) for metric in self.metrics: metric.update_dict(labels, preds) def update(self, labels, preds): """ Updates the internal evaluation result. Parameters: ---------- labels : torch.Tensor The labels of the data. preds : torch.Tensor Predicted values. """ for metric in self.metrics: metric.update(labels, preds) def reset(self): """ Resets the internal evaluation result to initial state. """ try: for metric in self.metrics: metric.reset() except AttributeError: pass def reset_local(self): """ Resets the local portion of the internal evaluation results to initial state. """ try: for metric in self.metrics: metric.reset_local() except AttributeError: pass def get(self): """ Returns the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ names = [] values = [] for metric in self.metrics: name, value = metric.get() name = [name] value = [value] names.extend(name) values.extend(value) return names, values def get_global(self): """ Returns the current evaluation result. Returns: ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ names = [] values = [] for metric in self.metrics: name, value = metric.get_global() name = [name] value = [value] names.extend(name) values.extend(value) return names, values def get_config(self): config = super(CompositeEvalMetric, self).get_config() config.update({"metrics": [i.get_config() for i in self.metrics]}) return config
9,289
27.323171
117
py
imgclsmob
imgclsmob-master/pytorch/datasets/imagenet1k_cls_dataset.py
""" ImageNet-1K classification dataset. """ import os import math import cv2 import numpy as np from PIL import Image from torchvision.datasets import ImageFolder import torchvision.transforms as transforms from .dataset_metainfo import DatasetMetaInfo class ImageNet1K(ImageFolder): """ ImageNet-1K classification dataset. Parameters: ---------- root : str, default '~/.torch/datasets/imagenet' Path to the folder stored the dataset. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "imagenet"), mode="train", transform=None): split = "train" if mode == "train" else "val" root = os.path.join(root, split) super(ImageNet1K, self).__init__(root=root, transform=transform) class ImageNet1KMetaInfo(DatasetMetaInfo): """ Descriptor of ImageNet-1K dataset. """ def __init__(self): super(ImageNet1KMetaInfo, self).__init__() self.label = "ImageNet1K" self.short_label = "imagenet" self.root_dir_name = "imagenet" self.dataset_class = ImageNet1K self.num_training_samples = None self.in_channels = 3 self.num_classes = 1000 self.input_image_size = (224, 224) self.resize_inv_factor = 0.875 self.train_metric_capts = ["Train.Top1"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err-top1"}] self.val_metric_capts = ["Val.Top1", "Val.Top5"] self.val_metric_names = ["Top1Error", "TopKError"] self.val_metric_extra_kwargs = [{"name": "err-top1"}, {"name": "err-top5", "top_k": 5}] self.saver_acc_ind = 1 self.train_transform = imagenet_train_transform self.val_transform = imagenet_val_transform self.test_transform = imagenet_val_transform self.ml_type = "imgcls" self.use_cv_resize = False self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.interpolation = Image.BILINEAR def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(ImageNet1KMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, default=self.input_image_size[0], help="size of the input for model") parser.add_argument( "--resize-inv-factor", type=float, default=self.resize_inv_factor, help="inverted ratio for input image crop") parser.add_argument( "--use-cv-resize", action="store_true", help="use OpenCV resize preprocessing") parser.add_argument( "--mean-rgb", nargs=3, type=float, default=self.mean_rgb, help="Mean of RGB channels in the dataset") parser.add_argument( "--std-rgb", nargs=3, type=float, default=self.std_rgb, help="STD of RGB channels in the dataset") parser.add_argument( "--interpolation", type=int, default=self.interpolation, help="Preprocessing interpolation") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(ImageNet1KMetaInfo, self).update(args) self.input_image_size = (args.input_size, args.input_size) self.use_cv_resize = args.use_cv_resize self.mean_rgb = args.mean_rgb self.std_rgb = args.std_rgb self.interpolation = args.interpolation def imagenet_train_transform(ds_metainfo, jitter_param=0.4): """ Create image transform sequence for training subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. jitter_param : float How much to jitter values. Returns: ------- Compose Image transform sequence. """ input_image_size = ds_metainfo.input_image_size return transforms.Compose([ transforms.RandomResizedCrop(size=input_image_size, interpolation=ds_metainfo.interpolation), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=jitter_param, contrast=jitter_param, saturation=jitter_param), transforms.ToTensor(), transforms.Normalize( mean=ds_metainfo.mean_rgb, std=ds_metainfo.std_rgb) ]) def imagenet_val_transform(ds_metainfo): """ Create image transform sequence for validation subset. Parameters: ---------- ds_metainfo : DatasetMetaInfo ImageNet-1K dataset metainfo. Returns: ------- Compose Image transform sequence. """ input_image_size = ds_metainfo.input_image_size resize_value = calc_val_resize_value( input_image_size=ds_metainfo.input_image_size, resize_inv_factor=ds_metainfo.resize_inv_factor) return transforms.Compose([ CvResize(size=resize_value, interpolation=ds_metainfo.interpolation) if ds_metainfo.use_cv_resize else transforms.Resize(size=resize_value, interpolation=ds_metainfo.interpolation), transforms.CenterCrop(size=input_image_size), transforms.ToTensor(), transforms.Normalize( mean=ds_metainfo.mean_rgb, std=ds_metainfo.std_rgb) ]) class CvResize(object): """ Resize the input PIL Image to the given size via OpenCV. Parameters: ---------- size : int or tuple of (W, H) Size of output image. interpolation : int, default PIL.Image.BILINEAR Interpolation method for resizing. By default uses bilinear interpolation. """ def __init__(self, size, interpolation=Image.BILINEAR): self.size = size self.interpolation = interpolation def __call__(self, img): """ Resize image. Parameters: ---------- img : PIL.Image input image. Returns: ------- PIL.Image Resulted image. """ if self.interpolation == Image.NEAREST: cv_interpolation = cv2.INTER_NEAREST elif self.interpolation == Image.BILINEAR: cv_interpolation = cv2.INTER_LINEAR elif self.interpolation == Image.BICUBIC: cv_interpolation = cv2.INTER_CUBIC elif self.interpolation == Image.LANCZOS: cv_interpolation = cv2.INTER_LANCZOS4 else: raise ValueError() cv_img = np.array(img) if isinstance(self.size, int): w, h = img.size if (w <= h and w == self.size) or (h <= w and h == self.size): return img if w < h: out_size = (self.size, int(self.size * h / w)) else: out_size = (int(self.size * w / h), self.size) cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation) return Image.fromarray(cv_img) else: cv_img = cv2.resize(cv_img, dsize=self.size, interpolation=cv_interpolation) return Image.fromarray(cv_img) def calc_val_resize_value(input_image_size=(224, 224), resize_inv_factor=0.875): """ Calculate image resize value for validation subset. Parameters: ---------- input_image_size : tuple of 2 int Main script arguments. resize_inv_factor : float Resize inverted factor. Returns: ------- int Resize value. """ if isinstance(input_image_size, int): input_image_size = (input_image_size, input_image_size) resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor)) return resize_value
8,645
30.44
110
py
imgclsmob
imgclsmob-master/pytorch/datasets/hpe_dataset.py
""" Keypoint detection (2D single human pose estimation) dataset. """ import copy import logging import random import cv2 import numpy as np import torch import torch.utils.data as data class HpeDataset(data.Dataset): def __init__(self, cfg, root, image_set, is_train, transform=None): self.num_joints = 0 self.pixel_std = 200 self.flip_pairs = [] self.parent_ids = [] self.is_train = is_train self.root = root self.image_set = image_set self.output_path = cfg.OUTPUT_DIR self.data_format = cfg.DATASET.DATA_FORMAT self.scale_factor = cfg.DATASET.SCALE_FACTOR self.rotation_factor = cfg.DATASET.ROT_FACTOR self.flip = cfg.DATASET.FLIP self.image_size = cfg.MODEL.IMAGE_SIZE self.target_type = 'gaussian' self.heatmap_size = cfg.MODEL.EXTRA.HEATMAP_SIZE self.sigma = cfg.MODEL.EXTRA.SIGMA self.transform = transform self.db = [] def _get_db(self): raise NotImplementedError def evaluate(self, cfg, preds, output_dir, *args, **kwargs): raise NotImplementedError def __len__(self,): return len(self.db) def __getitem__(self, idx): db_rec = copy.deepcopy(self.db[idx]) image_file = db_rec['image'] filename = db_rec['filename'] if 'filename' in db_rec else '' imgnum = db_rec['imgnum'] if 'imgnum' in db_rec else '' if self.data_format == 'zip': from utils import zipreader data_numpy = zipreader.imread( image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) else: data_numpy = cv2.imread( image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) if data_numpy is None: logging.error('=> fail to read {}'.format(image_file)) raise ValueError('Fail to read {}'.format(image_file)) joints = db_rec['joints_3d'] joints_vis = db_rec['joints_3d_vis'] c = db_rec['center'] s = db_rec['scale'] score = db_rec['score'] if 'score' in db_rec else 1 r = 0 if self.is_train: sf = self.scale_factor rf = self.rotation_factor s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf) r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0 if self.flip and random.random() <= 0.5: data_numpy = data_numpy[:, ::-1, :] joints, joints_vis = fliplr_joints(joints, joints_vis, data_numpy.shape[1], self.flip_pairs) c[0] = data_numpy.shape[1] - c[0] - 1 trans = get_affine_transform(c, s, r, self.image_size) input = cv2.warpAffine( data_numpy, trans, (int(self.image_size[0]), int(self.image_size[1])), flags=cv2.INTER_LINEAR) if self.transform: input = self.transform(input) for i in range(self.num_joints): if joints_vis[i, 0] > 0.0: joints[i, 0:2] = affine_transform(joints[i, 0:2], trans) target, target_weight = self.generate_target(joints, joints_vis) target = torch.from_numpy(target) target_weight = torch.from_numpy(target_weight) meta = { 'image': image_file, 'filename': filename, 'imgnum': imgnum, 'joints': joints, 'joints_vis': joints_vis, 'center': c, 'scale': s, 'rotation': r, 'score': score } return input, target, target_weight, meta def select_data(self, db): db_selected = [] for rec in db: num_vis = 0 joints_x = 0.0 joints_y = 0.0 for joint, joint_vis in zip( rec['joints_3d'], rec['joints_3d_vis']): if joint_vis[0] <= 0: continue num_vis += 1 joints_x += joint[0] joints_y += joint[1] if num_vis == 0: continue joints_x, joints_y = joints_x / num_vis, joints_y / num_vis area = rec['scale'][0] * rec['scale'][1] * (self.pixel_std**2) joints_center = np.array([joints_x, joints_y]) bbox_center = np.array(rec['center']) diff_norm2 = np.linalg.norm(joints_center - bbox_center, 2) ks = np.exp(-1.0 * (diff_norm2 ** 2) / (0.2 ** 2 * 2.0 * area)) metric = (0.2 / 16) * num_vis + 0.45 - 0.2 / 16 if ks > metric: db_selected.append(rec) logging.info('=> num db: {}'.format(len(db))) logging.info('=> num selected db: {}'.format(len(db_selected))) return db_selected def generate_target(self, joints, joints_vis): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' target_weight = np.ones((self.num_joints, 1), dtype=np.float32) target_weight[:, 0] = joints_vis[:, 0] assert self.target_type == 'gaussian', 'Only support gaussian map now!' if self.target_type == 'gaussian': target = np.zeros((self.num_joints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32) tmp_size = self.sigma * 3 for joint_id in range(self.num_joints): feat_stride = self.image_size / self.heatmap_size mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5) mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5) # Check that any part of the gaussian is in-bounds ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \ or br[0] < 0 or br[1] < 0: # If not, just return the image as is target_weight[joint_id] = 0 continue # # Generate gaussian size = 2 * tmp_size + 1 x = np.arange(0, size, 1, np.float32) y = x[:, np.newaxis] x0 = y0 = size // 2 # The gaussian is not normalized, we want the center value to equal 1 g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2)) # Usable gaussian range g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0] g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1] # Image range img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0]) img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1]) v = target_weight[joint_id] if v > 0.5: target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \ g[g_y[0]:g_y[1], g_x[0]:g_x[1]] return target, target_weight def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): if not isinstance(scale, np.ndarray) and not isinstance(scale, list): print(scale) scale = np.array([scale, scale]) scale_tmp = scale * 200.0 src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans def get_3rd_point(a, b): direct = a - b return b + np.array([-direct[1], direct[0]], dtype=np.float32) def get_dir(src_point, rot_rad): sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.]).T new_pt = np.dot(t, new_pt) return new_pt[:2] def fliplr_joints(joints, joints_vis, width, matched_parts): """ flip coords """ # Flip horizontal joints[:, 0] = width - joints[:, 0] - 1 # Change left-right parts for pair in matched_parts: joints[pair[0], :], joints[pair[1], :] = joints[pair[1], :], joints[pair[0], :].copy() joints_vis[pair[0], :], joints_vis[pair[1], :] = joints_vis[pair[1], :], joints_vis[pair[0], :].copy() return joints * joints_vis, joints_vis
9,597
32.559441
110
py
imgclsmob
imgclsmob-master/pytorch/datasets/coco_hpe1_dataset.py
""" COCO keypoint detection (2D single human pose estimation) dataset. """ import os import copy import cv2 import numpy as np import torch import torch.utils.data as data from .dataset_metainfo import DatasetMetaInfo class CocoHpe1Dataset(data.Dataset): """ COCO keypoint detection (2D single human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. splits : list of str, default ['person_keypoints_val2017'] Json annotations name. Candidates can be: person_keypoints_val2017, person_keypoints_train2017. check_centers : bool, default is False If true, will force check centers of bbox and keypoints, respectively. If centers are far away from each other, remove this label. skip_empty : bool, default is False Whether skip entire image if no valid label is found. Use `False` if this dataset is for validation to avoid COCO metric error. """ CLASSES = ["person"] KEYPOINTS = { 0: "nose", 1: "left_eye", 2: "right_eye", 3: "left_ear", 4: "right_ear", 5: "left_shoulder", 6: "right_shoulder", 7: "left_elbow", 8: "right_elbow", 9: "left_wrist", 10: "right_wrist", 11: "left_hip", 12: "right_hip", 13: "left_knee", 14: "right_knee", 15: "left_ankle", 16: "right_ankle" } SKELETON = [ [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] def __init__(self, root, mode="train", transform=None, splits=("person_keypoints_val2017",), check_centers=False, skip_empty=True): super(CocoHpe1Dataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self.transform = transform self.num_class = len(self.CLASSES) if isinstance(splits, str): splits = [splits] self._splits = splits self._coco = [] self._check_centers = check_centers self._skip_empty = skip_empty self.index_map = dict(zip(type(self).CLASSES, range(self.num_class))) self.json_id_to_contiguous = None self.contiguous_id_to_json = None self._items, self._labels = self._load_jsons() mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") self.annotations_file_path = annotations_file_path def __str__(self): detail = ",".join([str(s) for s in self._splits]) return self.__class__.__name__ + "(" + detail + ")" @property def classes(self): """ Category names. """ return type(self).CLASSES @property def num_joints(self): """ Dataset defined: number of joints provided. """ return 17 @property def joint_pairs(self): """ Joint pairs which defines the pairs of joint to be swapped when the image is flipped horizontally. """ return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]] @property def coco(self): """ Return pycocotools object for evaluation purposes. """ if not self._coco: raise ValueError("No coco objects found, dataset not initialized.") if len(self._coco) > 1: raise NotImplementedError( "Currently we don't support evaluating {} JSON files".format(len(self._coco))) return self._coco[0] def __len__(self): return len(self._items) def __getitem__(self, idx): img_path = self._items[idx] img_id = int(os.path.splitext(os.path.basename(img_path))[0]) label = copy.deepcopy(self._labels[idx]) # img = mx.image.imread(img_path, 1) # img = Image.open(img_path).convert("RGB") img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR) img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) if self.transform is not None: img, scale, center, score = self.transform(img, label) res_label = np.array([float(img_id)] + [float(score)] + list(center) + list(scale), np.float32) img = torch.from_numpy(img) res_label = torch.from_numpy(res_label) return img, res_label def _load_jsons(self): """ Load all image paths and labels from JSON annotation files into buffer. """ items = [] labels = [] from pycocotools.coco import COCO for split in self._splits: anno = os.path.join(self._root, "annotations", split) + ".json" _coco = COCO(anno) self._coco.append(_coco) classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())] if not classes == self.classes: raise ValueError("Incompatible category names with COCO: ") assert classes == self.classes json_id_to_contiguous = { v: k for k, v in enumerate(_coco.getCatIds())} if self.json_id_to_contiguous is None: self.json_id_to_contiguous = json_id_to_contiguous self.contiguous_id_to_json = { v: k for k, v in self.json_id_to_contiguous.items()} else: assert self.json_id_to_contiguous == json_id_to_contiguous # iterate through the annotations image_ids = sorted(_coco.getImgIds()) for entry in _coco.loadImgs(image_ids): dirname, filename = entry["coco_url"].split("/")[-2:] abs_path = os.path.join(self._root, dirname, filename) if not os.path.exists(abs_path): raise IOError("Image: {} not exists.".format(abs_path)) label = self._check_load_keypoints(_coco, entry) if not label: continue # num of items are relative to person, not image for obj in label: items.append(abs_path) labels.append(obj) return items, labels def _check_load_keypoints(self, coco, entry): """ Check and load ground-truth keypoints. """ ann_ids = coco.getAnnIds(imgIds=entry["id"], iscrowd=False) objs = coco.loadAnns(ann_ids) # check valid bboxes valid_objs = [] width = entry["width"] height = entry["height"] for obj in objs: contiguous_cid = self.json_id_to_contiguous[obj["category_id"]] if contiguous_cid >= self.num_class: # not class of interest continue if max(obj["keypoints"]) == 0: continue # convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height) # require non-zero box area if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin: continue # joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32) for i in range(self.num_joints): joints_3d[i, 0, 0] = obj["keypoints"][i * 3 + 0] joints_3d[i, 1, 0] = obj["keypoints"][i * 3 + 1] # joints_3d[i, 2, 0] = 0 visible = min(1, obj["keypoints"][i * 3 + 2]) joints_3d[i, :2, 1] = visible # joints_3d[i, 2, 1] = 0 if np.sum(joints_3d[:, 0, 1]) < 1: # no visible keypoint continue if self._check_centers: bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax)) kp_center, num_vis = self._get_keypoints_center_count(joints_3d) ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area) if (num_vis / 80.0 + 47 / 80.0) > ks: continue valid_objs.append({ "bbox": (xmin, ymin, xmax, ymax), "joints_3d": joints_3d }) if not valid_objs: if not self._skip_empty: # dummy invalid labels if no valid objects are found valid_objs.append({ "bbox": np.array([-1, -1, 0, 0]), "joints_3d": np.zeros((self.num_joints, 3, 2), dtype=np.float32) }) return valid_objs @staticmethod def _get_box_center_area(bbox): """ Get bbox center. """ c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0]) area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) return c, area @staticmethod def _get_keypoints_center_count(keypoints): """ Get geometric center of all keypoints. """ keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0)) keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0)) num = float(np.sum(keypoints[:, 0, 1])) return np.array([keypoint_x / num, keypoint_y / num]), num @staticmethod def bbox_clip_xyxy(xyxy, width, height): """ Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary. All bounding boxes will be clipped to the new region `(0, 0, width, height)`. Parameters: ---------- xyxy : list, tuple or numpy.ndarray The bbox in format (xmin, ymin, xmax, ymax). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. width : int or float Boundary width. height : int or float Boundary height. Returns: ------- tuple or np.array Description of returned object. """ if isinstance(xyxy, (tuple, list)): if not len(xyxy) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy))) x1 = np.minimum(width - 1, np.maximum(0, xyxy[0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[3])) return x1, y1, x2, y2 elif isinstance(xyxy, np.ndarray): if not xyxy.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape)) x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3])) return np.hstack((x1, y1, x2, y2)) else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy))) @staticmethod def bbox_xywh_to_xyxy(xywh): """ Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax) Parameters: ---------- xywh : list, tuple or numpy.ndarray The bbox in format (x, y, w, h). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. Returns: ------- tuple or np.ndarray The converted bboxes in format (xmin, ymin, xmax, ymax). If input is numpy.ndarray, return is numpy.ndarray correspondingly. """ if isinstance(xywh, (tuple, list)): if not len(xywh) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh))) w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0) return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h elif isinstance(xywh, np.ndarray): if not xywh.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape)) xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1))) return xyxy else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh))) # --------------------------------------------------------------------------------------------------------------------- class CocoHpeValTransform1(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo self.image_size = self.ds_metainfo.input_image_size height = self.image_size[0] width = self.image_size[1] self.aspect_ratio = float(width / height) self.mean = ds_metainfo.mean_rgb self.std = ds_metainfo.std_rgb def __call__(self, src, label): bbox = label["bbox"] assert len(bbox) == 4 xmin, ymin, xmax, ymax = bbox center, scale = _box_to_center_scale(xmin, ymin, xmax - xmin, ymax - ymin, self.aspect_ratio) score = label.get("score", 1) h, w = self.image_size trans = get_affine_transform(center, scale, 0, [w, h]) # src_np = np.array(src) img = cv2.warpAffine(src, trans, (int(w), int(h)), flags=cv2.INTER_LINEAR) # img = mx.nd.image.to_tensor(mx.nd.array(img)) # img = mx.nd.image.normalize(img, mean=self.mean, std=self.std) img = img.astype(np.float32) img = img / 255.0 img = (img - np.array(self.mean, np.float32)) / np.array(self.std, np.float32) img = img.transpose((2, 0, 1)) return img, scale, center, score def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25): pixel_std = 1 center = np.zeros((2,), dtype=np.float32) center[0] = x + w * 0.5 center[1] = y + h * 0.5 if w > aspect_ratio * h: h = w / aspect_ratio elif w < aspect_ratio * h: w = h * aspect_ratio scale = np.array( [w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32) if center[0] != -1: scale = scale * scale_mult return center, scale def get_dir(src_point, rot_rad): sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result def crop(img, center, scale, output_size, rot=0): trans = get_affine_transform(center, scale, rot, output_size) dst_img = cv2.warpAffine( img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img def get_3rd_point(a, b): direct = a - b return b + np.array([-direct[1], direct[0]], dtype=np.float32) def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): if not isinstance(scale, np.ndarray) and not isinstance(scale, list): scale = np.array([scale, scale]) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans # --------------------------------------------------------------------------------------------------------------------- class CocoHpeValTransform2(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo self.image_size = self.ds_metainfo.input_image_size height = self.image_size[0] width = self.image_size[1] self.aspect_ratio = float(width / height) self.mean = ds_metainfo.mean_rgb self.std = ds_metainfo.std_rgb def __call__(self, src, label): # print(src.shape) bbox = label["bbox"] assert len(bbox) == 4 score = label.get('score', 1) img, scale_box = detector_to_alpha_pose( src, class_ids=np.array([[0.]]), scores=np.array([[1.]]), bounding_boxs=np.array(np.array([bbox])), output_shape=self.image_size) if scale_box.shape[0] == 1: pt1 = np.array(scale_box[0, (0, 1)], dtype=np.float32) pt2 = np.array(scale_box[0, (2, 3)], dtype=np.float32) else: assert scale_box.shape[0] == 4 pt1 = np.array(scale_box[(0, 1)], dtype=np.float32) pt2 = np.array(scale_box[(2, 3)], dtype=np.float32) return img[0].astype(np.float32), pt1, pt2, score def detector_to_alpha_pose(img, class_ids, scores, bounding_boxs, output_shape=(256, 192), thr=0.5): boxes, scores = alpha_pose_detection_processor( img=img, boxes=bounding_boxs, class_idxs=class_ids, scores=scores, thr=thr) pose_input, upscale_bbox = alpha_pose_image_cropper( source_img=img, boxes=boxes, output_shape=output_shape) return pose_input, upscale_bbox def alpha_pose_detection_processor(img, boxes, class_idxs, scores, thr=0.5): if len(boxes.shape) == 3: boxes = boxes.squeeze(axis=0) if len(class_idxs.shape) == 3: class_idxs = class_idxs.squeeze(axis=0) if len(scores.shape) == 3: scores = scores.squeeze(axis=0) # cilp coordinates boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0., img.shape[1] - 1) boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0., img.shape[0] - 1) # select boxes mask1 = (class_idxs == 0).astype(np.int32) mask2 = (scores > thr).astype(np.int32) picked_idxs = np.where((mask1 + mask2) > 1)[0] if picked_idxs.shape[0] == 0: return None, None else: return boxes[picked_idxs], scores[picked_idxs] def alpha_pose_image_cropper(source_img, boxes, output_shape=(256, 192)): if boxes is None: return None, boxes # crop person poses img_width, img_height = source_img.shape[1], source_img.shape[0] tensors = np.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]]) out_boxes = np.zeros([boxes.shape[0], 4]) for i, box in enumerate(boxes): img = source_img.copy() box_width = box[2] - box[0] box_height = box[3] - box[1] if box_width > 100: scale_rate = 0.2 else: scale_rate = 0.3 # crop image left = int(max(0, box[0] - box_width * scale_rate / 2)) up = int(max(0, box[1] - box_height * scale_rate / 2)) right = int(min(img_width - 1, max(left + 5, box[2] + box_width * scale_rate / 2))) bottom = int(min(img_height - 1, max(up + 5, box[3] + box_height * scale_rate / 2))) crop_width = right - left if crop_width < 1: continue crop_height = bottom - up if crop_height < 1: continue ul = np.array((left, up)) br = np.array((right, bottom)) img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1]) img = img.astype(np.float32) img = img / 255.0 img = img.transpose((2, 0, 1)) # img = mx.nd.image.to_tensor(np.array(img)) # img = img.transpose((2, 0, 1)) img[0] = img[0] - 0.406 img[1] = img[1] - 0.457 img[2] = img[2] - 0.480 assert (img.shape[0] == 3) tensors[i] = img out_boxes[i] = (left, up, right, bottom) return tensors, out_boxes def cv_cropBox(img, ul, br, resH, resW, pad_val=0): ul = ul br = (br - 1) # br = br.int() lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW) lenW = lenH * resW / resH if img.ndim == 2: img = img[:, np.newaxis] box_shape = [br[1] - ul[1], br[0] - ul[0]] pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2] # Padding Zeros img[:ul[1], :, :], img[:, :ul[0], :] = pad_val, pad_val img[br[1] + 1:, :, :], img[:, br[0] + 1:, :] = pad_val, pad_val src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32) src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32) dst[0, :] = 0 dst[1, :] = np.array([resW - 1, resH - 1], np.float32) src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR) return dst_img # --------------------------------------------------------------------------------------------------------------------- def recalc_pose1(keypoints, bbs, image_size): def transform_preds(coords, center, scale, output_size): def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.]).T new_pt = np.dot(t, new_pt) return new_pt[:2] target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, inv=1) for p in range(coords.shape[0]): target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) return target_coords center = bbs[:, :2] scale = bbs[:, 2:4] heatmap_height = image_size[0] // 4 heatmap_width = image_size[1] // 4 output_size = [heatmap_width, heatmap_height] preds = np.zeros_like(keypoints) for i in range(keypoints.shape[0]): preds[i] = transform_preds(keypoints[i], center[i], scale[i], output_size) return preds def recalc_pose1b(pred, label, image_size, visible_conf_threshold=0.0): label_img_id = label[:, 0].astype(np.int32) label_score = label[:, 1] label_bbs = label[:, 2:6] pred_keypoints = pred[:, :, :2] pred_score = pred[:, :, 2] pred[:, :, :2] = recalc_pose1(pred_keypoints, label_bbs, image_size) pred_person_score = [] batch = pred_keypoints.shape[0] num_joints = pred_keypoints.shape[1] for idx in range(batch): kpt_score = 0 count = 0 for i in range(num_joints): mval = float(pred_score[idx][i]) if mval > visible_conf_threshold: kpt_score += mval count += 1 if count > 0: kpt_score /= count kpt_score = kpt_score * float(label_score[idx]) pred_person_score.append(kpt_score) return pred, pred_person_score, label_img_id def recalc_pose2(keypoints, bbs, image_size): def transformBoxInvert(pt, ul, br, resH, resW): center = np.zeros(2) center[0] = (br[0] - 1 - ul[0]) / 2 center[1] = (br[1] - 1 - ul[1]) / 2 lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW) lenW = lenH * resW / resH _pt = (pt * lenH) / resH if bool(((lenW - 1) / 2 - center[0]) > 0): _pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0]) if bool(((lenH - 1) / 2 - center[1]) > 0): _pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1]) new_point = np.zeros(2) new_point[0] = _pt[0] + ul[0] new_point[1] = _pt[1] + ul[1] return new_point pt2 = bbs[:, :2] pt1 = bbs[:, 2:4] heatmap_height = image_size[0] // 4 heatmap_width = image_size[1] // 4 preds = np.zeros_like(keypoints) for i in range(keypoints.shape[0]): for j in range(keypoints.shape[1]): preds[i, j] = transformBoxInvert(keypoints[i, j], pt1[i], pt2[i], heatmap_height, heatmap_width) return preds def recalc_pose2b(pred, label, image_size, visible_conf_threshold=0.0): label_img_id = label[:, 0].astype(np.int32) label_score = label[:, 1] label_bbs = label[:, 2:6] pred_keypoints = pred[:, :, :2] pred_score = pred[:, :, 2] pred[:, :, :2] = recalc_pose2(pred_keypoints, label_bbs, image_size) pred_person_score = [] batch = pred_keypoints.shape[0] num_joints = pred_keypoints.shape[1] for idx in range(batch): kpt_score = 0 count = 0 for i in range(num_joints): mval = float(pred_score[idx][i]) if mval > visible_conf_threshold: kpt_score += mval count += 1 if count > 0: kpt_score /= count kpt_score = kpt_score * float(label_score[idx]) pred_person_score.append(kpt_score) return pred, pred_person_score, label_img_id # --------------------------------------------------------------------------------------------------------------------- class CocoHpe1MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe1MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe1Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = CocoHpe1Dataset.classes self.input_image_size = (256, 192) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose1b(x, y, self.input_image_size)}] self.saver_acc_ind = 0 self.do_transform = True self.val_transform = CocoHpeValTransform1 self.test_transform = CocoHpeValTransform1 self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.model_type = 1 def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe1MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--model-type", type=int, default=self.model_type, help="model type (1=SimplePose, 2=AlphaPose)") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe1MetaInfo, self).update(args) self.input_image_size = args.input_size self.model_type = args.model_type if self.model_type == 1: self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\ lambda x, y: recalc_pose1b(x, y, self.input_image_size) self.val_transform = CocoHpeValTransform1 self.test_transform = CocoHpeValTransform1 else: self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\ lambda x, y: recalc_pose2b(x, y, self.input_image_size) self.val_transform = CocoHpeValTransform2 self.test_transform = CocoHpeValTransform2 def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
30,012
33.817865
119
py
imgclsmob
imgclsmob-master/pytorch/datasets/coco_det_dataset.py
""" MS COCO object detection dataset. """ import os import cv2 import logging import mxnet as mx import numpy as np from PIL import Image import torch.utils.data as data from .dataset_metainfo import DatasetMetaInfo __all__ = ['CocoDetMetaInfo'] class CocoDetDataset(data.Dataset): """ MS COCO detection dataset. Parameters: ---------- root : str Path to folder storing the dataset. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. splits : list of str, default ['instances_val2017'] Json annotations name. Candidates can be: instances_val2017, instances_train2017. min_object_area : float Minimum accepted ground-truth area, if an object's area is smaller than this value, it will be ignored. skip_empty : bool, default is True Whether skip images with no valid object. This should be `True` in training, otherwise it will cause undefined behavior. use_crowd : bool, default is True Whether use boxes labeled as crowd instance. """ CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] def __init__(self, root, mode="train", transform=None, splits=('instances_val2017',), min_object_area=0, skip_empty=True, use_crowd=True): super(CocoDetDataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self._transform = transform self.num_class = len(self.CLASSES) self._min_object_area = min_object_area self._skip_empty = skip_empty self._use_crowd = use_crowd if isinstance(splits, mx.base.string_types): splits = [splits] self._splits = splits self.index_map = dict(zip(type(self).CLASSES, range(self.num_class))) self.json_id_to_contiguous = None self.contiguous_id_to_json = None self._coco = [] self._items, self._labels, self._im_aspect_ratios = self._load_jsons() mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + "2017.json") self.annotations_file_path = annotations_file_path def __str__(self): detail = ','.join([str(s) for s in self._splits]) return self.__class__.__name__ + '(' + detail + ')' @property def coco(self): """ Return pycocotools object for evaluation purposes. """ if not self._coco: raise ValueError("No coco objects found, dataset not initialized.") if len(self._coco) > 1: raise NotImplementedError( "Currently we don't support evaluating {} JSON files. \ Please use single JSON dataset and evaluate one by one".format(len(self._coco))) return self._coco[0] @property def classes(self): """ Category names. """ return type(self).CLASSES @property def annotation_dir(self): """ The subdir for annotations. Default is 'annotations'(coco default) For example, a coco format json file will be searched as 'root/annotation_dir/xxx.json' You can override if custom dataset don't follow the same pattern """ return 'annotations' def get_im_aspect_ratio(self): """Return the aspect ratio of each image in the order of the raw data.""" if self._im_aspect_ratios is not None: return self._im_aspect_ratios self._im_aspect_ratios = [None] * len(self._items) for i, img_path in enumerate(self._items): with Image.open(img_path) as im: w, h = im.size self._im_aspect_ratios[i] = 1.0 * w / h return self._im_aspect_ratios def _parse_image_path(self, entry): """How to parse image dir and path from entry. Parameters: ---------- entry : dict COCO entry, e.g. including width, height, image path, etc.. Returns: ------- abs_path : str Absolute path for corresponding image. """ dirname, filename = entry["coco_url"].split("/")[-2:] abs_path = os.path.join(self._root, dirname, filename) return abs_path def __len__(self): return len(self._items) def __getitem__(self, idx): img_path = self._items[idx] label = self._labels[idx] # img = mx.image.imread(img_path, 1) img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR) label = np.array(label).copy() if self._transform is not None: img, label = self._transform(img, label) return img, label def _load_jsons(self): """ Load all image paths and labels from JSON annotation files into buffer. """ items = [] labels = [] im_aspect_ratios = [] from pycocotools.coco import COCO for split in self._splits: anno = os.path.join(self._root, self.annotation_dir, split) + ".json" _coco = COCO(anno) self._coco.append(_coco) classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())] if not classes == self.classes: raise ValueError("Incompatible category names with COCO: ") assert classes == self.classes json_id_to_contiguous = { v: k for k, v in enumerate(_coco.getCatIds())} if self.json_id_to_contiguous is None: self.json_id_to_contiguous = json_id_to_contiguous self.contiguous_id_to_json = { v: k for k, v in self.json_id_to_contiguous.items()} else: assert self.json_id_to_contiguous == json_id_to_contiguous # iterate through the annotations image_ids = sorted(_coco.getImgIds()) for entry in _coco.loadImgs(image_ids): abs_path = self._parse_image_path(entry) if not os.path.exists(abs_path): raise IOError("Image: {} not exists.".format(abs_path)) label = self._check_load_bbox(_coco, entry) if not label: continue im_aspect_ratios.append(float(entry["width"]) / entry["height"]) items.append(abs_path) labels.append(label) return items, labels, im_aspect_ratios def _check_load_bbox(self, coco, entry): """ Check and load ground-truth labels. """ entry_id = entry['id'] # fix pycocotools _isArrayLike which don't work for str in python3 entry_id = [entry_id] if not isinstance(entry_id, (list, tuple)) else entry_id ann_ids = coco.getAnnIds(imgIds=entry_id, iscrowd=None) objs = coco.loadAnns(ann_ids) # check valid bboxes valid_objs = [] width = entry["width"] height = entry["height"] for obj in objs: if obj["area"] < self._min_object_area: continue if obj.get("ignore", 0) == 1: continue if not self._use_crowd and obj.get("iscrowd", 0): continue # convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height) # require non-zero box area if obj["area"] > 0 and xmax > xmin and ymax > ymin: contiguous_cid = self.json_id_to_contiguous[obj["category_id"]] valid_objs.append([xmin, ymin, xmax, ymax, contiguous_cid]) if not valid_objs: if not self._skip_empty: # dummy invalid labels if no valid objects are found valid_objs.append([-1, -1, -1, -1, -1]) return valid_objs @staticmethod def bbox_clip_xyxy(xyxy, width, height): """ Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary. All bounding boxes will be clipped to the new region `(0, 0, width, height)`. Parameters: ---------- xyxy : list, tuple or numpy.ndarray The bbox in format (xmin, ymin, xmax, ymax). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. width : int or float Boundary width. height : int or float Boundary height. Returns: ------- tuple or np.array Description of returned object. """ if isinstance(xyxy, (tuple, list)): if not len(xyxy) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy))) x1 = np.minimum(width - 1, np.maximum(0, xyxy[0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[3])) return x1, y1, x2, y2 elif isinstance(xyxy, np.ndarray): if not xyxy.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape)) x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3])) return np.hstack((x1, y1, x2, y2)) else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy))) @staticmethod def bbox_xywh_to_xyxy(xywh): """ Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax) Parameters: ---------- xywh : list, tuple or numpy.ndarray The bbox in format (x, y, w, h). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. Returns: ------- tuple or np.ndarray The converted bboxes in format (xmin, ymin, xmax, ymax). If input is numpy.ndarray, return is numpy.ndarray correspondingly. """ if isinstance(xywh, (tuple, list)): if not len(xywh) == 4: raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh))) w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0) return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h elif isinstance(xywh, np.ndarray): if not xywh.size % 4 == 0: raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape)) xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1))) return xyxy else: raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh))) # --------------------------------------------------------------------------------------------------------------------- class CocoDetValTransform(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo self.image_size = self.ds_metainfo.input_image_size self._height = self.image_size[0] self._width = self.image_size[1] self._mean = np.array(ds_metainfo.mean_rgb, dtype=np.float32).reshape(1, 1, 3) self._std = np.array(ds_metainfo.std_rgb, dtype=np.float32).reshape(1, 1, 3) def __call__(self, src, label): # resize img, bbox = src, label input_h, input_w = self._height, self._width h, w, _ = src.shape s = max(h, w) * 1.0 c = np.array([w / 2., h / 2.], dtype=np.float32) trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h]) inp = cv2.warpAffine(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR) output_w = input_w output_h = input_h trans_output = self.get_affine_transform(c, s, 0, [output_w, output_h]) for i in range(bbox.shape[0]): bbox[i, :2] = self.affine_transform(bbox[i, :2], trans_output) bbox[i, 2:4] = self.affine_transform(bbox[i, 2:4], trans_output) bbox[:, :2] = np.clip(bbox[:, :2], 0, output_w - 1) bbox[:, 2:4] = np.clip(bbox[:, 2:4], 0, output_h - 1) img = inp # to tensor img = img.astype(np.float32) / 255.0 img = (img - self._mean) / self._std img = img.transpose(2, 0, 1).astype(np.float32) img = img return img, bbox.astype(img.dtype) @staticmethod def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0): """ Get affine transform matrix given center, scale and rotation. Parameters: ---------- center : tuple of float Center point. scale : float Scaling factor. rot : float Rotation degree. output_size : tuple of int (width, height) of the output size. shift : float Shift factor. inv : bool Whether inverse the computation. Returns: ------- numpy.ndarray Affine matrix. """ if not isinstance(scale, np.ndarray) and not isinstance(scale, list): scale = np.array([scale, scale], dtype=np.float32) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = np.pi * rot / 180 src_dir = CocoDetValTransform.get_rot_dir([0, src_w * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :] = center + scale_tmp * shift src[1, :] = center + src_dir + scale_tmp * shift dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir src[2:, :] = CocoDetValTransform.get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = CocoDetValTransform.get_3rd_point(dst[0, :], dst[1, :]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans @staticmethod def get_rot_dir(src_point, rot_rad): """ Get rotation direction. Parameters: ---------- src_point : tuple of float Original point. rot_rad : float Rotation radian. Returns: ------- tuple of float Rotation. """ sn, cs = np.sin(rot_rad), np.cos(rot_rad) src_result = [0, 0] src_result[0] = src_point[0] * cs - src_point[1] * sn src_result[1] = src_point[0] * sn + src_point[1] * cs return src_result @staticmethod def get_3rd_point(a, b): """ Get the 3rd point position given first two points. Parameters: ---------- a : tuple of float First point. b : tuple of float Second point. Returns: ------- tuple of float Third point. """ direct = a - b return b + np.array([-direct[1], direct[0]], dtype=np.float32) @staticmethod def affine_transform(pt, t): """ Apply affine transform to a bounding box given transform matrix t. Parameters: ---------- pt : numpy.ndarray Bounding box with shape (1, 2). t : numpy.ndarray Transformation matrix with shape (2, 3). Returns: ------- numpy.ndarray New bounding box with shape (1, 2). """ new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2] class Tuple(object): """ Wrap multiple batchify functions to form a function apply each input function on each input fields respectively. """ def __init__(self, fn, *args): if isinstance(fn, (list, tuple)): self._fn = fn else: self._fn = (fn,) + args def __call__(self, data): """ Batchify the input data. Parameters: ---------- data : list The samples to batchfy. Each sample should contain N attributes. Returns: ------- tuple A tuple of length N. Contains the batchified result of each attribute in the input. """ ret = [] for i, ele_fn in enumerate(self._fn): ret.append(ele_fn([ele[i] for ele in data])) return tuple(ret) class Stack(object): """ Stack the input data samples to construct the batch. """ def __call__(self, data): """ Batchify the input data. Parameters: ---------- data : list The input data samples Returns: ------- NDArray Result. """ return self._stack_arrs(data, True) @staticmethod def _stack_arrs(arrs, use_shared_mem=False): """ Internal imple for stacking arrays. """ if isinstance(arrs[0], mx.nd.NDArray): if use_shared_mem: out = mx.nd.empty((len(arrs),) + arrs[0].shape, dtype=arrs[0].dtype, ctx=mx.Context("cpu_shared", 0)) return mx.nd.stack(*arrs, out=out) else: return mx.nd.stack(*arrs) else: out = np.asarray(arrs) if use_shared_mem: return mx.nd.array(out, ctx=mx.Context("cpu_shared", 0)) else: return mx.nd.array(out) class Pad(object): """ Pad the input ndarrays along the specific padding axis and stack them to get the output. """ def __init__(self, axis=0, pad_val=0, num_shards=1, ret_length=False): self._axis = axis self._pad_val = pad_val self._num_shards = num_shards self._ret_length = ret_length def __call__(self, data): """ Batchify the input data. Parameters: ---------- data : list A list of N samples. Each sample can be 1) ndarray or 2) a list/tuple of ndarrays Returns: ------- NDArray Data in the minibatch. Shape is (N, ...) NDArray, optional The sequences' original lengths at the padded axis. Shape is (N,). This will only be returned in `ret_length` is True. """ if isinstance(data[0], (mx.nd.NDArray, np.ndarray, list)): padded_arr, original_length = self._pad_arrs_to_max_length( data, self._axis, self._pad_val, self._num_shards, True) if self._ret_length: return padded_arr, original_length else: return padded_arr else: raise NotImplementedError @staticmethod def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, num_shards=1, use_shared_mem=False): """ Inner Implementation of the Pad batchify. """ if not isinstance(arrs[0], (mx.nd.NDArray, np.ndarray)): arrs = [np.asarray(ele) for ele in arrs] if isinstance(pad_axis, tuple): original_length = [] for axis in pad_axis: original_length.append(np.array([ele.shape[axis] for ele in arrs])) original_length = np.stack(original_length).T else: original_length = np.array([ele.shape[pad_axis] for ele in arrs]) pad_axis = [pad_axis] if len(original_length) % num_shards != 0: logging.warning( 'Batch size cannot be evenly split. Trying to shard %d items into %d shards', len(original_length), num_shards) original_length = np.array_split(original_length, num_shards) max_lengths = [np.max(ll, axis=0, keepdims=len(pad_axis) == 1) for ll in original_length] # add batch dimension ret_shape = [[ll.shape[0], ] + list(arrs[0].shape) for ll in original_length] for i, shape in enumerate(ret_shape): for j, axis in enumerate(pad_axis): shape[1 + axis] = max_lengths[i][j] if use_shared_mem: ret = [mx.nd.full(shape=tuple(shape), val=pad_val, ctx=mx.Context('cpu_shared', 0), dtype=arrs[0].dtype) for shape in ret_shape] original_length = [mx.nd.array(ll, ctx=mx.Context('cpu_shared', 0), dtype=np.int32) for ll in original_length] else: ret = [mx.nd.full(shape=tuple(shape), val=pad_val, dtype=arrs[0].dtype) for shape in ret_shape] original_length = [mx.nd.array(ll, dtype=np.int32) for ll in original_length] for i, arr in enumerate(arrs): if ret[i // ret[0].shape[0]].shape[1:] == arr.shape: ret[i // ret[0].shape[0]][i % ret[0].shape[0]] = arr else: slices = [slice(0, ll) for ll in arr.shape] ret[i // ret[0].shape[0]][i % ret[0].shape[0]][tuple(slices)] = arr if len(ret) == len(original_length) == 1: return ret[0], original_length[0] return ret, original_length def get_post_transform(orig_w, orig_h, out_w, out_h): """Get the post prediction affine transforms. This will be used to adjust the prediction results according to original coco image resolutions. Parameters: ---------- orig_w : int Original width of the image. orig_h : int Original height of the image. out_w : int Width of the output image after prediction. out_h : int Height of the output image after prediction. Returns: ------- numpy.ndarray Affine transform matrix 3x2. """ s = max(orig_w, orig_h) * 1.0 c = np.array([orig_w / 2., orig_h / 2.], dtype=np.float32) trans_output = CocoDetValTransform.get_affine_transform(c, s, 0, [out_w, out_h], inv=True) return trans_output class CocoDetMetaInfo(DatasetMetaInfo): def __init__(self): super(CocoDetMetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoDetDataset self.num_training_samples = None self.in_channels = 3 self.num_classes = CocoDetDataset.classes self.input_image_size = (512, 512) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.mAP"] self.test_metric_names = ["CocoDetMApMetric"] self.test_metric_extra_kwargs = [ {"name": "mAP", "img_height": 512, "coco_annotations_file_path": None, "contiguous_id_to_json": None, "data_shape": None, "post_affine": get_post_transform}] self.test_dataset_extra_kwargs =\ {"skip_empty": False} self.saver_acc_ind = 0 self.do_transform = True self.do_transform_first = False self.last_batch = "keep" self.batchify_fn = Tuple(Stack(), Pad(pad_val=-1)) self.val_transform = CocoDetValTransform self.test_transform = CocoDetValTransform self.ml_type = "hpe" self.allow_hybridize = False self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoDetMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoDetMetaInfo, self).update(args) self.input_image_size = args.input_size self.test_metric_extra_kwargs[0]["img_height"] = self.input_image_size[0] self.test_metric_extra_kwargs[0]["data_shape"] = self.input_image_size def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path self.test_metric_extra_kwargs[0]["contiguous_id_to_json"] = dataset.contiguous_id_to_json
27,185
35.688259
119
py
imgclsmob
imgclsmob-master/pytorch/datasets/seg_dataset.py
import random import numpy as np from PIL import Image, ImageOps, ImageFilter import torch.utils.data as data class SegDataset(data.Dataset): """ Segmentation base dataset. Parameters: ---------- root : str Path to the folder stored the dataset. mode : str 'train', 'val', 'test', or 'demo'. transform : func A function that takes data and transforms it. """ def __init__(self, root, mode, transform, base_size=520, crop_size=480): assert (mode in ("train", "val", "test", "demo")) self.root = root self.mode = mode self.transform = transform self.base_size = base_size self.crop_size = crop_size def _val_sync_transform(self, image, mask): outsize = self.crop_size short_size = outsize w, h = image.size if w > h: oh = short_size ow = int(1.0 * w * oh / h) else: ow = short_size oh = int(1.0 * h * ow / w) image = image.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # center crop w, h = image.size x1 = int(round(0.5 * (w - outsize))) y1 = int(round(0.5 * (h - outsize))) image = image.crop((x1, y1, x1 + outsize, y1 + outsize)) mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) # final transform image, mask = self._img_transform(image), self._mask_transform(mask) return image, mask def _sync_transform(self, image, mask): # random mirror if random.random() < 0.5: image = image.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) crop_size = self.crop_size # random scale (short edge) short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = image.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) image = image.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # pad crop if short_size < crop_size: padh = crop_size - oh if oh < crop_size else 0 padw = crop_size - ow if ow < crop_size else 0 image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) # random crop crop_size w, h = image.size x1 = random.randint(0, w - crop_size) y1 = random.randint(0, h - crop_size) image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size)) mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) # gaussian blur as in PSP if random.random() < 0.5: image = image.filter(ImageFilter.GaussianBlur( radius=random.random())) # final transform image, mask = self._img_transform(image), self._mask_transform(mask) return image, mask @staticmethod def _img_transform(image): return np.array(image) @staticmethod def _mask_transform(mask): return np.array(mask).astype(np.int32)
3,366
33.010101
89
py
imgclsmob
imgclsmob-master/pytorch/datasets/coco_hpe2_dataset.py
""" COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose). """ import os import json import math import cv2 from operator import itemgetter import numpy as np import torch import torch.utils.data as data from .dataset_metainfo import DatasetMetaInfo class CocoHpe2Dataset(data.Dataset): """ COCO keypoint detection (2D multiple human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None): super(CocoHpe2Dataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self.transform = transform mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") with open(annotations_file_path, "r") as f: self.file_names = json.load(f)["images"] self.image_dir_path = os.path.join(root, mode_name + "2017") self.annotations_file_path = annotations_file_path def __str__(self): return self.__class__.__name__ + "(" + self._root + ")" def __len__(self): return len(self.file_names) def __getitem__(self, idx): file_name = self.file_names[idx]["file_name"] image_file_path = os.path.join(self.image_dir_path, file_name) image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR) # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) img_mean = (128, 128, 128) img_scale = 1.0 / 256 base_height = 368 stride = 8 pad_value = (0, 0, 0) height, width, _ = image.shape image = self.normalize(image, img_mean, img_scale) ratio = base_height / float(image.shape[0]) image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC) min_dims = [base_height, max(image.shape[1], base_height)] image, pad = self.pad_width( image, stride, pad_value, min_dims) image = image.astype(np.float32) image = image.transpose((2, 0, 1)) image = torch.from_numpy(image) # if self.transform is not None: # image = self.transform(image) image_id = int(os.path.splitext(os.path.basename(file_name))[0]) label = np.array([image_id, 1.0] + pad + [height, width], np.float32) label = torch.from_numpy(label) return image, label @staticmethod def normalize(img, img_mean, img_scale): img = np.array(img, dtype=np.float32) img = (img - img_mean) * img_scale return img @staticmethod def pad_width(img, stride, pad_value, min_dims): h, w, _ = img.shape h = min(min_dims[0], h) min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride min_dims[1] = max(min_dims[1], w) min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride top = int(math.floor((min_dims[0] - h) / 2.0)) left = int(math.floor((min_dims[1] - w) / 2.0)) bottom = int(min_dims[0] - h - top) right = int(min_dims[1] - w - left) pad = [top, left, bottom, right] padded_img = cv2.copyMakeBorder( src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=pad_value) return padded_img, pad # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2ValTransform(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo def __call__(self, src, label): return src, label def extract_keypoints(heatmap, all_keypoints, total_keypoint_num): heatmap[heatmap < 0.1] = 0 heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant") heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1] heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]] heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2] heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1] heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1] heatmap_peaks = (heatmap_center > heatmap_left) &\ (heatmap_center > heatmap_right) &\ (heatmap_center > heatmap_up) &\ (heatmap_center > heatmap_down) heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1] keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h) keypoints = sorted(keypoints, key=itemgetter(0)) suppressed = np.zeros(len(keypoints), np.uint8) keypoints_with_score_and_id = [] keypoint_num = 0 for i in range(len(keypoints)): if suppressed[i]: continue for j in range(i + 1, len(keypoints)): if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6: suppressed[j] = 1 keypoint_with_score_and_id = ( keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]], total_keypoint_num + keypoint_num) keypoints_with_score_and_id.append(keypoint_with_score_and_id) keypoint_num += 1 all_keypoints.append(keypoints_with_score_and_id) return keypoint_num def group_keypoints(all_keypoints_by_type, pafs, pose_entry_size=20, min_paf_score=0.05): def linspace2d(start, stop, n=10): points = 1 / (n - 1) * (stop - start) return points[:, None] * np.arange(n) + start[:, None] BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]] BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]) pose_entries = [] all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist]) for part_id in range(len(BODY_PARTS_PAF_IDS)): part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]] kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]] kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]] num_kpts_a = len(kpts_a) num_kpts_b = len(kpts_b) kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part continue elif num_kpts_a == 0: # body part has just 'b' keypoints for i in range(num_kpts_b): num = 0 for j in range(len(pose_entries)): # check if already in some pose, was added by another body part if pose_entries[j][kpt_b_id] == kpts_b[i][3]: num += 1 continue if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx pose_entry[-1] = 1 # num keypoints in pose pose_entry[-2] = kpts_b[i][2] # pose score pose_entries.append(pose_entry) continue elif num_kpts_b == 0: # body part has just 'a' keypoints for i in range(num_kpts_a): num = 0 for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == kpts_a[i][3]: num += 1 continue if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_a_id] = kpts_a[i][3] pose_entry[-1] = 1 pose_entry[-2] = kpts_a[i][2] pose_entries.append(pose_entry) continue connections = [] for i in range(num_kpts_a): kpt_a = np.array(kpts_a[i][0:2]) for j in range(num_kpts_b): kpt_b = np.array(kpts_b[j][0:2]) mid_point = [(), ()] mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)), int(round((kpt_a[1] + kpt_b[1]) * 0.5))) mid_point[1] = mid_point[0] vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]] vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2) if vec_norm == 0: continue vec[0] /= vec_norm vec[1] /= vec_norm cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] + vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1]) height_n = pafs.shape[0] // 2 success_ratio = 0 point_num = 10 # number of points to integration over paf if cur_point_score > -100: passed_point_score = 0 passed_point_num = 0 x, y = linspace2d(kpt_a, kpt_b) for point_idx in range(point_num): px = int(round(x[point_idx])) py = int(round(y[point_idx])) paf = part_pafs[py, px, 0:2] cur_point_score = vec[0] * paf[0] + vec[1] * paf[1] if cur_point_score > min_paf_score: passed_point_score += cur_point_score passed_point_num += 1 success_ratio = passed_point_num / point_num ratio = 0 if passed_point_num > 0: ratio = passed_point_score / passed_point_num ratio += min(height_n / vec_norm - 1, 0) if ratio > 0 and success_ratio > 0.8: score_all = ratio + kpts_a[i][2] + kpts_b[j][2] connections.append([i, j, ratio, score_all]) if len(connections) > 0: connections = sorted(connections, key=itemgetter(2), reverse=True) num_connections = min(num_kpts_a, num_kpts_b) has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32) has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32) filtered_connections = [] for row in range(len(connections)): if len(filtered_connections) == num_connections: break i, j, cur_point_score = connections[row][0:3] if not has_kpt_a[i] and not has_kpt_b[j]: filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score]) has_kpt_a[i] = 1 has_kpt_b[j] = 1 connections = filtered_connections if len(connections) == 0: continue if part_id == 0: pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))] for i in range(len(connections)): pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0] pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1] pose_entries[i][-1] = 2 pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] elif part_id == 17 or part_id == 18: kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] for i in range(len(connections)): for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1: pose_entries[j][kpt_b_id] = connections[i][1] elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1: pose_entries[j][kpt_a_id] = connections[i][0] continue else: kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] for i in range(len(connections)): num = 0 for j in range(len(pose_entries)): if pose_entries[j][kpt_a_id] == connections[i][0]: pose_entries[j][kpt_b_id] = connections[i][1] num += 1 pose_entries[j][-1] += 1 pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2] if num == 0: pose_entry = np.ones(pose_entry_size) * -1 pose_entry[kpt_a_id] = connections[i][0] pose_entry[kpt_b_id] = connections[i][1] pose_entry[-1] = 2 pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] pose_entries.append(pose_entry) filtered_entries = [] for i in range(len(pose_entries)): if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2): continue filtered_entries.append(pose_entries[i]) pose_entries = np.asarray(filtered_entries) return pose_entries, all_keypoints def convert_to_coco_format(pose_entries, all_keypoints): coco_keypoints = [] scores = [] for n in range(len(pose_entries)): if len(pose_entries[n]) == 0: continue keypoints = [0] * 17 * 3 to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3] person_score = pose_entries[n][-2] position_id = -1 for keypoint_id in pose_entries[n][:-2]: position_id += 1 if position_id == 1: # no 'neck' in COCO continue cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found if keypoint_id != -1: cx, cy, score = all_keypoints[int(keypoint_id), 0:3] cx = cx + 0.5 cy = cy + 0.5 visibility = 1 keypoints[to_coco_map[position_id] * 3 + 0] = cx keypoints[to_coco_map[position_id] * 3 + 1] = cy keypoints[to_coco_map[position_id] * 3 + 2] = visibility coco_keypoints.append(keypoints) scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck' return coco_keypoints, scores def recalc_pose(pred, label): label_img_id = label[:, 0].astype(np.int32) # label_score = label[:, 1] pads = label[:, 2:6].astype(np.int32) heights = label[:, 6].astype(np.int32) widths = label[:, 7].astype(np.int32) keypoints = 19 stride = 8 heatmap2ds = pred[:, :keypoints] paf2ds = pred[:, keypoints:(3 * keypoints)] pred_pts_score = [] pred_person_score = [] label_img_id_ = [] batch = pred.shape[0] for batch_i in range(batch): label_img_id_i = label_img_id[batch_i] pad = list(pads[batch_i]) height = int(heights[batch_i]) width = int(widths[batch_i]) heatmap2d = heatmap2ds[batch_i] paf2d = paf2ds[batch_i] heatmaps = np.transpose(heatmap2d, (1, 2, 0)) heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :] heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC) pafs = np.transpose(paf2d, (1, 2, 0)) pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :] pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC) total_keypoints_num = 0 all_keypoints_by_type = [] for kpt_idx in range(18): # 19th for bg total_keypoints_num += extract_keypoints( heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num) pose_entries, all_keypoints = group_keypoints( all_keypoints_by_type, pafs) coco_keypoints, scores = convert_to_coco_format( pose_entries, all_keypoints) pred_pts_score.append(coco_keypoints) pred_person_score.append(scores) label_img_id_.append([label_img_id_i] * len(scores)) return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0]) # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe2MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe2Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = 17 self.input_image_size = (368, 368) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}] self.saver_acc_ind = 0 self.do_transform = True self.val_transform = CocoHpe2ValTransform self.test_transform = CocoHpe2ValTransform self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.load_ignore_extra = False def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--load-ignore-extra", action="store_true", help="ignore extra layers in the source PyTroch model") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe2MetaInfo, self).update(args) self.input_image_size = args.input_size self.load_ignore_extra = args.load_ignore_extra def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
20,780
39.747059
119
py
imgclsmob
imgclsmob-master/pytorch/datasets/svhn_cls_dataset.py
""" SVHN classification dataset. """ import os from torchvision.datasets import SVHN from .cifar10_cls_dataset import CIFAR10MetaInfo class SVHNFine(SVHN): """ SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/. Each sample is an image (in 3D NDArray) with shape (32, 32, 3). Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, we assign the label `0` to the digit `0`. Parameters: ---------- root : str, default '~/.torch/datasets/svhn' Path to temp folder for storing data. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "svhn"), mode="train", transform=None): super(SVHNFine, self).__init__( root=root, split=("train" if mode == "train" else "test"), transform=transform, download=True) class SVHNMetaInfo(CIFAR10MetaInfo): def __init__(self): super(SVHNMetaInfo, self).__init__() self.label = "SVHN" self.root_dir_name = "svhn" self.dataset_class = SVHNFine self.num_training_samples = 73257
1,364
30.022727
93
py
imgclsmob
imgclsmob-master/pytorch/datasets/coco_hpe3_dataset.py
""" COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose). """ import os # import json import math import cv2 import numpy as np import torch from torch.nn import functional as F import torch.utils.data as data from .dataset_metainfo import DatasetMetaInfo class CocoHpe3Dataset(data.Dataset): """ COCO keypoint detection (2D multiple human pose estimation) dataset. Parameters: ---------- root : string Path to `annotations`, `train2017`, and `val2017` folders. mode : string, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None): super(CocoHpe3Dataset, self).__init__() self._root = os.path.expanduser(root) self.mode = mode self.transform = transform mode_name = "train" if mode == "train" else "val" annotations_dir_path = os.path.join(root, "annotations") annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json") # with open(annotations_file_path, "r") as f: # self.file_names = json.load(f)["images"] self.image_dir_path = os.path.join(root, mode_name + "2017") self.annotations_file_path = annotations_file_path from pycocotools.coco import COCO self.coco_gt = COCO(self.annotations_file_path) self.validation_ids = self.coco_gt.getImgIds()[:] def __str__(self): return self.__class__.__name__ + "(" + self._root + ")" def __len__(self): return len(self.validation_ids) def __getitem__(self, idx): # file_name = self.file_names[idx]["file_name"] image_id = self.validation_ids[idx] file_name = self.coco_gt.imgs[image_id]["file_name"] image_file_path = os.path.join(self.image_dir_path, file_name) image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR) # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB) image_src_shape = image.shape[:2] boxsize = 512 max_downsample = 64 pad_value = 128 scale = boxsize / image.shape[0] if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800: scale = min(2600 / image.shape[0], 3800 / image.shape[1]) image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) image, pad = self.pad_right_down_corner(image, max_downsample, pad_value) image = np.float32(image / 255) image = image.transpose((2, 0, 1)) image = torch.from_numpy(image) # image_id = int(os.path.splitext(os.path.basename(file_name))[0]) label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32) label = torch.from_numpy(label) return image, label @staticmethod def pad_right_down_corner(img, stride, pad_value): h = img.shape[0] w = img.shape[1] pad = 4 * [None] pad[0] = 0 # up pad[1] = 0 # left pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right img_padded = img pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1)) img_padded = np.concatenate((pad_up, img_padded), axis=0) pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1)) img_padded = np.concatenate((pad_left, img_padded), axis=1) pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1)) img_padded = np.concatenate((img_padded, pad_down), axis=0) pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1)) img_padded = np.concatenate((img_padded, pad_right), axis=1) return img_padded, pad # --------------------------------------------------------------------------------------------------------------------- class CocoHpe2ValTransform(object): def __init__(self, ds_metainfo): self.ds_metainfo = ds_metainfo def __call__(self, src, label): return src, label def recalc_pose(pred, label): dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15, 14: 2, 15: 1, 16: 4, 17: 3} parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"] num_parts = len(parts) parts_dict = dict(zip(parts, range(num_parts))) limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck', 'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho', 'Lhip', 'Rear', 'Lear', 'Rhip'] limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho', 'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip', 'Rkne', 'Rsho', 'Lsho', 'Lhip'] limb_from = [parts_dict[n] for n in limb_from] limb_to = [parts_dict[n] for n in limb_to] assert limb_from == [x for x in [ 1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]] assert limb_to == [x for x in [ 0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]] limbs_conn = list(zip(limb_from, limb_to)) limb_seq = limbs_conn paf_layers = 30 num_layers = 50 stride = 4 label_img_id = label[:, 0].astype(np.int32) # label_score = label[:, 1] pads = label[:, 2:6].astype(np.int32) image_src_shapes = label[:, 6:8].astype(np.int32) pred_pts_score = [] pred_person_score = [] label_img_id_ = [] batch = pred.shape[0] for batch_i in range(batch): label_img_id_i = label_img_id[batch_i] pad = list(pads[batch_i]) image_src_shape = list(image_src_shapes[batch_i]) output_blob = pred[batch_i].transpose((1, 2, 0)) output_paf = output_blob[:, :, :paf_layers] output_heatmap = output_blob[:, :, paf_layers:num_layers] heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[ pad[0]:(output_blob.shape[0] * stride - pad[2]), pad[1]:(output_blob.shape[1] * stride - pad[3]), :] heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC) paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = paf[ pad[0]:(output_blob.shape[0] * stride - pad[2]), pad[1]:(output_blob.shape[1] * stride - pad[3]), :] paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC) all_peaks = find_peaks(heatmap) connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq) subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq) for s in subset[..., 0]: keypoint_indexes = s[:18] person_keypoint_coordinates = [] for index in keypoint_indexes: if index == -1: X, Y, C = 0, 0, 0 else: X, Y, C = list(candidate[index.astype(int)][:2]) + [1] person_keypoint_coordinates.append([X, Y, C]) person_keypoint_coordinates_coco = [None] * 17 for dt_index, gt_index in dt_gt_mapping.items(): if gt_index is None: continue person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index] pred_pts_score.append(person_keypoint_coordinates_coco) pred_person_score.append(1 - 1.0 / s[18]) label_img_id_.append(label_img_id_i) return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_) def find_peaks(heatmap_avg): thre1 = 0.1 offset_radius = 2 all_peaks = [] peak_counter = 0 heatmap_avg = heatmap_avg.astype(np.float32) filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...] filter_map = torch.from_numpy(filter_map).cuda() filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1) filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0)) for part in range(18): map_ori = heatmap_avg[:, :, part] peaks_binary = filter_map[:, :, part] peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks] id = range(peak_counter, peak_counter + len(refined_peaks_with_score)) peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def keypoint_heatmap_nms(heat, kernel=3, thre=0.1): # keypoint NMS on heatmap (score map) pad = (kernel - 1) // 2 pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect") hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0) keep = (hmax == heat).float() * (heat >= thre).float() return heat * keep def refine_centroid(scorefmp, anchor, radius): """ Refine the centroid coordinate. It dose not affect the results after testing. :param scorefmp: 2-D numpy array, original regressed score map :param anchor: python tuple, (x,y) coordinates :param radius: int, range of considered scores :return: refined anchor, refined score """ x_c, y_c = anchor x_min = x_c - radius x_max = x_c + radius + 1 y_min = y_c - radius y_max = y_c + radius + 1 if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0: return anchor + (scorefmp[y_c, x_c], ) score_box = scorefmp[y_min:y_max, x_min:x_max] x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1] offset_x = (score_box * x_grid).sum() / score_box.sum() offset_y = (score_box * y_grid).sum() / score_box.sum() x_refine = x_c + offset_x y_refine = y_c + offset_y refined_anchor = (x_refine, y_refine) return refined_anchor + (score_box.mean(),) def find_connections(all_peaks, paf_avg, image_width, limb_seq): mid_num_ = 20 thre2 = 0.1 connect_ration = 0.8 connection_all = [] special_k = [] for k in range(len(limb_seq)): score_mid = paf_avg[:, :, k] candA = all_peaks[limb_seq[k][0]] candB = all_peaks[limb_seq[k][1]] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) mid_num = min(int(round(norm + 1)), mid_num_) if norm == 0: continue startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num))) limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for I in range(len(startend))]) score_midpts = limb_response score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0) criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, norm, 0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True) connection = np.zeros((0, 6)) for c in range(len(connection_candidate)): i, j, s, limb_len = connection_candidate[c][0:4] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return connection_all, special_k def find_people(connection_all, special_k, all_peaks, limb_seq): len_rate = 16.0 connection_tole = 0.7 remove_recon = 0 subset = -1 * np.ones((0, 20, 2)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(limb_seq)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(limb_seq[k]) for i in range(len(connection_all[k])): found = 0 subset_idx = [-1, -1] for j in range(len(subset)): if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype( int) == partBs[i].astype(int): if found >= 2: continue subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if subset[j][indexB][0].astype(int) == -1 and\ len_rate * subset[j][-1][1] > connection_all[k][i][-1]: subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-1][0] += 1 subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) elif subset[j][indexB][0].astype(int) != partBs[i].astype(int): if subset[j][indexB][1] >= connection_all[k][i][2]: pass else: if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]: continue subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1] subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\ subset[j][indexB][1] <= connection_all[k][i][2]: subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1] subset[j][indexB][0] = partBs[i] subset[j][indexB][1] = connection_all[k][i][2] subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1]) else: pass elif found == 2: j1, j2 = subset_idx membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2] membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2] membership = membership1 + membership2 if len(np.nonzero(membership == 2)[0]) == 0: min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1]) min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1]) min_tolerance = min(min_limb1, min_limb2) if connection_all[k][i][2] < connection_tole * min_tolerance or\ len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]: continue subset[j1][:-2][...] += (subset[j2][:-2][...] + 1) subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0] subset[j1][-2][0] += connection_all[k][i][2] subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1]) subset = np.delete(subset, j2, 0) else: if connection_all[k][i][0] in subset[j1, :-2, 0]: c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0]) c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1]) else: c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1]) c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0]) c1 = int(c1[0]) c2 = int(c2[0]) assert c1 != c2, "an candidate keypoint is used twice, shared by two people" if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]: continue small_j = j1 remove_c = c1 if subset[j1][c1][1] > subset[j2][c2][1]: small_j = j2 remove_c = c2 if remove_recon > 0: subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \ subset[small_j][remove_c][1] subset[small_j][remove_c][0] = -1 subset[small_j][remove_c][1] = -1 subset[small_j][-1][0] -= 1 elif not found and k < len(limb_seq): row = -1 * np.ones((20, 2)) row[indexA][0] = partAs[i] row[indexA][1] = connection_all[k][i][2] row[indexB][0] = partBs[i] row[indexB][1] = connection_all[k][i][2] row[-1][0] = 2 row[-1][1] = connection_all[k][i][-1] row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] row = row[np.newaxis, :, :] subset = np.concatenate((subset, row), axis=0) deleteIdx = [] for i in range(len(subset)): if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) return subset, candidate # --------------------------------------------------------------------------------------------------------------------- class CocoHpe3MetaInfo(DatasetMetaInfo): def __init__(self): super(CocoHpe3MetaInfo, self).__init__() self.label = "COCO" self.short_label = "coco" self.root_dir_name = "coco" self.dataset_class = CocoHpe3Dataset self.num_training_samples = None self.in_channels = 3 self.num_classes = 17 self.input_image_size = (256, 256) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_capts = ["Val.CocoOksAp"] self.test_metric_names = ["CocoHpeOksApMetric"] self.test_metric_extra_kwargs = [ {"name": "OksAp", "coco_annotations_file_path": None, "validation_ids": None, "use_file": False, "pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}] self.saver_acc_ind = 0 self.do_transform = True self.val_transform = CocoHpe2ValTransform self.test_transform = CocoHpe2ValTransform self.ml_type = "hpe" self.net_extra_kwargs = {} self.mean_rgb = (0.485, 0.456, 0.406) self.std_rgb = (0.229, 0.224, 0.225) self.load_ignore_extra = False def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for ImageNet-1K dataset metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--input-size", type=int, nargs=2, default=self.input_image_size, help="size of the input for model") parser.add_argument( "--load-ignore-extra", action="store_true", help="ignore extra layers in the source PyTroch model") def update(self, args): """ Update ImageNet-1K dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CocoHpe3MetaInfo, self).update(args) self.input_image_size = args.input_size self.load_ignore_extra = args.load_ignore_extra def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path # self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids
23,180
40.101064
120
py
imgclsmob
imgclsmob-master/pytorch/datasets/asr_dataset.py
""" Automatic Speech Recognition (ASR) abstract dataset. """ __all__ = ['AsrDataset', 'asr_test_transform'] import torch.utils.data as data import torchvision.transforms as transforms from pytorch.pytorchcv.models.jasper import NemoAudioReader class AsrDataset(data.Dataset): """ Automatic Speech Recognition (ASR) abstract dataset. Parameters: ---------- root : str Path to the folder stored the dataset. mode : str 'train', 'val', 'test', or 'demo'. transform : func A function that takes data and transforms it. """ def __init__(self, root, mode, transform): super(AsrDataset, self).__init__() assert (mode in ("train", "val", "test", "demo")) self.root = root self.mode = mode self.transform = transform self.data = [] self.audio_reader = NemoAudioReader() def __getitem__(self, index): wav_file_path, label_text = self.data[index] audio_data = self.audio_reader.read_from_file(wav_file_path) audio_len = audio_data.shape[0] return (audio_data, audio_len), label_text def __len__(self): return len(self.data) def asr_test_transform(ds_metainfo): assert (ds_metainfo is not None) return transforms.Compose([ transforms.ToTensor(), ])
1,385
25.653846
68
py
imgclsmob
imgclsmob-master/pytorch/datasets/cifar10_cls_dataset.py
""" CIFAR-10 classification dataset. """ import os from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms from .dataset_metainfo import DatasetMetaInfo class CIFAR10Fine(CIFAR10): """ CIFAR-10 image classification dataset. Parameters: ---------- root : str, default '~/.torch/datasets/cifar10' Path to temp folder for storing data. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "cifar10"), mode="train", transform=None): super(CIFAR10Fine, self).__init__( root=root, train=(mode == "train"), transform=transform, download=True) class CIFAR10MetaInfo(DatasetMetaInfo): def __init__(self): super(CIFAR10MetaInfo, self).__init__() self.label = "CIFAR10" self.short_label = "cifar" self.root_dir_name = "cifar10" self.dataset_class = CIFAR10Fine self.num_training_samples = 50000 self.in_channels = 3 self.num_classes = 10 self.input_image_size = (32, 32) self.train_metric_capts = ["Train.Err"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err"}] self.val_metric_capts = ["Val.Err"] self.val_metric_names = ["Top1Error"] self.val_metric_extra_kwargs = [{"name": "err"}] self.saver_acc_ind = 0 self.train_transform = cifar10_train_transform self.val_transform = cifar10_val_transform self.test_transform = cifar10_val_transform self.ml_type = "imgcls" def cifar10_train_transform(ds_metainfo, mean_rgb=(0.4914, 0.4822, 0.4465), std_rgb=(0.2023, 0.1994, 0.2010), jitter_param=0.4): assert (ds_metainfo is not None) assert (ds_metainfo.input_image_size[0] == 32) return transforms.Compose([ transforms.RandomCrop( size=32, padding=4), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=jitter_param, contrast=jitter_param, saturation=jitter_param), transforms.ToTensor(), transforms.Normalize( mean=mean_rgb, std=std_rgb) ]) def cifar10_val_transform(ds_metainfo, mean_rgb=(0.4914, 0.4822, 0.4465), std_rgb=(0.2023, 0.1994, 0.2010)): assert (ds_metainfo is not None) return transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=mean_rgb, std=std_rgb) ])
2,897
30.5
73
py
imgclsmob
imgclsmob-master/pytorch/datasets/librispeech_asr_dataset.py
""" LibriSpeech ASR dataset. """ __all__ = ['LibriSpeech', 'LibriSpeechMetaInfo'] import os import numpy as np from .dataset_metainfo import DatasetMetaInfo from .asr_dataset import AsrDataset, asr_test_transform class LibriSpeech(AsrDataset): """ LibriSpeech dataset for Automatic Speech Recognition (ASR). Parameters: ---------- root : str, default '~/.torch/datasets/LibriSpeech' Path to the folder stored the dataset. mode : str, default 'test' 'train', 'val', 'test', or 'demo'. subset : str, default 'dev-clean' Data subset. transform : function, default None A function that takes data and transforms it. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "LibriSpeech"), mode="test", subset="dev-clean", transform=None): super(LibriSpeech, self).__init__( root=root, mode=mode, transform=transform) self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)} import soundfile root_dir_path = os.path.expanduser(root) assert os.path.exists(root_dir_path) data_dir_path = os.path.join(root_dir_path, subset) assert os.path.exists(data_dir_path) for speaker_id in os.listdir(data_dir_path): speaker_dir_path = os.path.join(data_dir_path, speaker_id) for chapter_id in os.listdir(speaker_dir_path): chapter_dir_path = os.path.join(speaker_dir_path, chapter_id) transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id)) with open(transcript_file_path, "r") as f: transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines()) for flac_file_name in os.listdir(chapter_dir_path): if flac_file_name.endswith(".flac"): wav_file_name = flac_file_name.replace(".flac", ".wav") wav_file_path = os.path.join(chapter_dir_path, wav_file_name) if not os.path.exists(wav_file_path): flac_file_path = os.path.join(chapter_dir_path, flac_file_name) pcm, sample_rate = soundfile.read(flac_file_path) soundfile.write(wav_file_path, pcm, sample_rate) text = transcripts[wav_file_name.replace(".wav", "")] text = text.strip("\n ").lower() text = np.array([vocabulary_dict[c] for c in text], dtype=np.long) self.data.append((wav_file_path, text)) class LibriSpeechMetaInfo(DatasetMetaInfo): def __init__(self): super(LibriSpeechMetaInfo, self).__init__() self.label = "LibriSpeech" self.short_label = "ls" self.root_dir_name = "LibriSpeech" self.dataset_class = LibriSpeech self.dataset_class_extra_kwargs = {"subset": "dev-clean"} self.ml_type = "asr" self.num_classes = 29 self.val_metric_extra_kwargs = [{"vocabulary": None}] self.val_metric_capts = ["Val.WER"] self.val_metric_names = ["WER"] self.test_metric_extra_kwargs = [{"vocabulary": None}] self.test_metric_capts = ["Test.WER"] self.test_metric_names = ["WER"] self.val_transform = asr_test_transform self.test_transform = asr_test_transform self.test_net_extra_kwargs = {"from_audio": True} self.saver_acc_ind = 0 def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for dataset specific metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--subset", type=str, default="dev-clean", help="data subset") def update(self, args): """ Update dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(LibriSpeechMetaInfo, self).update(args) self.dataset_class_extra_kwargs["subset"] = args.subset def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ vocabulary = dataset.vocabulary self.num_classes = len(vocabulary) + 1 self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
5,294
37.369565
119
py
imgclsmob
imgclsmob-master/pytorch/datasets/cub200_2011_cls_dataset.py
""" CUB-200-2011 classification dataset. """ import os import numpy as np import pandas as pd from PIL import Image import torch.utils.data as data from .imagenet1k_cls_dataset import ImageNet1KMetaInfo class CUB200_2011(data.Dataset): """ CUB-200-2011 fine-grained classification dataset. Parameters: ---------- root : str, default '~/.torch/datasets/CUB_200_2011' Path to the folder stored the dataset. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and transforms it. target_transform : function, default None A function that takes label and transforms it. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "CUB_200_2011"), mode="train", transform=None, target_transform=None): super(CUB200_2011, self).__init__() root_dir_path = os.path.expanduser(root) assert os.path.exists(root_dir_path) images_file_name = "images.txt" images_file_path = os.path.join(root_dir_path, images_file_name) if not os.path.exists(images_file_path): raise Exception("Images file doesn't exist: {}".format(images_file_name)) class_file_name = "image_class_labels.txt" class_file_path = os.path.join(root_dir_path, class_file_name) if not os.path.exists(class_file_path): raise Exception("Image class file doesn't exist: {}".format(class_file_name)) split_file_name = "train_test_split.txt" split_file_path = os.path.join(root_dir_path, split_file_name) if not os.path.exists(split_file_path): raise Exception("Split file doesn't exist: {}".format(split_file_name)) images_df = pd.read_csv( images_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "image_path"], dtype={"image_id": np.int32, "image_path": np.unicode}) class_df = pd.read_csv( class_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "class_id"], dtype={"image_id": np.int32, "class_id": np.uint8}) split_df = pd.read_csv( split_file_path, sep="\s+", header=None, index_col=False, names=["image_id", "split_flag"], dtype={"image_id": np.int32, "split_flag": np.uint8}) df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df") split_flag = 1 if mode == "train" else 0 subset_df = df[df.split_flag == split_flag] self.image_ids = subset_df["image_id"].values.astype(np.int32) self.class_ids = subset_df["class_id"].values.astype(np.int32) - 1 self.image_file_names = subset_df["image_path"].values.astype(np.unicode) images_dir_name = "images" self.images_dir_path = os.path.join(root_dir_path, images_dir_name) assert os.path.exists(self.images_dir_path) self._transform = transform self._target_transform = target_transform def __getitem__(self, index): image_file_name = self.image_file_names[index] image_file_path = os.path.join(self.images_dir_path, image_file_name) img = Image.open(image_file_path).convert("RGB") label = int(self.class_ids[index]) if self._transform is not None: img = self._transform(img) if self._target_transform is not None: label = self._target_transform(label) return img, label def __len__(self): return len(self.image_ids) class CUB200MetaInfo(ImageNet1KMetaInfo): def __init__(self): super(CUB200MetaInfo, self).__init__() self.label = "CUB200_2011" self.short_label = "cub" self.root_dir_name = "CUB_200_2011" self.dataset_class = CUB200_2011 self.num_training_samples = None self.num_classes = 200 self.train_metric_capts = ["Train.Err"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err"}] self.val_metric_capts = ["Val.Err"] self.val_metric_names = ["Top1Error"] self.val_metric_extra_kwargs = [{"name": "err"}] self.saver_acc_ind = 0 self.net_extra_kwargs = {"aux": False} self.load_ignore_extra = True def add_dataset_parser_arguments(self, parser, work_dir_path): super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--no-aux", dest="no_aux", action="store_true", help="no `aux` mode in model") def update(self, args): """ Update CUB-200-2011 dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(CUB200MetaInfo, self).update(args) if args.no_aux: self.net_extra_kwargs = None self.load_ignore_extra = False
5,320
34.711409
94
py
imgclsmob
imgclsmob-master/pytorch/datasets/mcv_asr_dataset.py
""" Mozilla Common Voice ASR dataset. """ __all__ = ['McvDataset', 'McvMetaInfo'] import os import re import numpy as np import pandas as pd from .dataset_metainfo import DatasetMetaInfo from .asr_dataset import AsrDataset, asr_test_transform class McvDataset(AsrDataset): """ Mozilla Common Voice dataset for Automatic Speech Recognition (ASR). Parameters: ---------- root : str, default '~/.torch/datasets/mcv' Path to the folder stored the dataset. mode : str, default 'test' 'train', 'val', 'test', or 'demo'. lang : str, default 'en' Language. subset : str, default 'dev' Data subset. transform : function, default None A function that takes data and transforms it. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "mcv"), mode="test", lang="en", subset="dev", transform=None): super(McvDataset, self).__init__( root=root, mode=mode, transform=transform) assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34")) self.vocabulary = self.get_vocabulary_for_lang(lang=lang) desired_audio_sample_rate = 16000 vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)} import soundfile import librosa from librosa.core import resample as lr_resample import unicodedata import unidecode root_dir_path = os.path.expanduser(root) assert os.path.exists(root_dir_path) lang_ = lang if lang != "ru34" else "ru" data_dir_path = os.path.join(root_dir_path, lang_) assert os.path.exists(data_dir_path) metainfo_file_path = os.path.join(data_dir_path, subset + ".tsv") assert os.path.exists(metainfo_file_path) metainfo_df = pd.read_csv( metainfo_file_path, sep="\t", header=0, index_col=False) metainfo_df = metainfo_df[["path", "sentence"]] self.data_paths = metainfo_df["path"].values self.data_sentences = metainfo_df["sentence"].values clips_dir_path = os.path.join(data_dir_path, "clips") assert os.path.exists(clips_dir_path) for clip_file_name, sentence in zip(self.data_paths, self.data_sentences): mp3_file_path = os.path.join(clips_dir_path, clip_file_name) assert os.path.exists(mp3_file_path) wav_file_name = clip_file_name.replace(".mp3", ".wav") wav_file_path = os.path.join(clips_dir_path, wav_file_name) # print("==> {}".format(sentence)) text = sentence.lower() if lang == "en": text = re.sub("\.|-|–|—", " ", text) text = re.sub("&", " and ", text) text = re.sub("ō", "o", text) text = re.sub("â|á", "a", text) text = re.sub("é", "e", text) text = re.sub(",|;|:|!|\?|\"|“|”|‘|’|\(|\)", "", text) text = re.sub("\s+", " ", text) text = re.sub(" '", " ", text) text = re.sub("' ", " ", text) elif lang == "fr": text = "".join(c for c in text if unicodedata.combining(c) == 0) text = re.sub("\.|-|–|—|=|×|\*|†|/|ቀ|_|…", " ", text) text = re.sub(",|;|:|!|\?|ʻ|“|”|\"|„|«|»|\(|\)", "", text) text = re.sub("먹|삼|생|고|기|집|\$|ʔ|の|ひ", "", text) text = re.sub("’|´", "'", text) text = re.sub("&", " and ", text) text = re.sub("œ", "oe", text) text = re.sub("æ", "ae", text) text = re.sub("á|ā|ã|ä|ą|ă|å", "a", text) text = re.sub("ö|ō|ó|ð|ổ|ø", "o", text) text = re.sub("ē|ė|ę", "e", text) text = re.sub("í|ī", "i", text) text = re.sub("ú|ū", "u", text) text = re.sub("ý", "y", text) text = re.sub("š|ś|ș|ş", "s", text) text = re.sub("ž|ź|ż", "z", text) text = re.sub("ñ|ń|ṇ", "n", text) text = re.sub("ł|ľ", "l", text) text = re.sub("ć|č", "c", text) text = re.sub("я", "ya", text) text = re.sub("ř", "r", text) text = re.sub("đ", "d", text) text = re.sub("ț", "t", text) text = re.sub("þ", "th", text) text = re.sub("ğ", "g", text) text = re.sub("ß", "ss", text) text = re.sub("µ", "mu", text) text = re.sub("\s+", " ", text) elif lang == "de": text = re.sub("\.|-|–|—|/|_|…", " ", text) text = re.sub(",|;|:|!|\?|\"|'|‘|’|ʻ|ʿ|‚|“|”|\"|„|«|»|›|‹|\(|\)", "", text) text = re.sub("°|幺|乡|辶", "", text) text = re.sub("&", " and ", text) text = re.sub("ə", "a", text) text = re.sub("æ", "ae", text) text = re.sub("å|ā|á|ã|ă|â|ą", "a", text) text = re.sub("ó|ð|ø|ọ|ő|ō|ô", "o", text) text = re.sub("é|ë|ê|ě|ę", "e", text) text = re.sub("ū|ứ", "u", text) text = re.sub("í|ï|ı", "i", text) text = re.sub("š|ș|ś|ş", "s", text) text = re.sub("č|ć", "c", text) text = re.sub("đ", "d", text) text = re.sub("ğ", "g", text) text = re.sub("ł", "l", text) text = re.sub("ř", "r", text) text = re.sub("ñ", "n", text) text = re.sub("ț", "t", text) text = re.sub("ž|ź", "z", text) text = re.sub("\s+", " ", text) elif lang == "it": text = re.sub("\.|-|–|—|/|_|…", " ", text) text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text) text = re.sub("\$|#|禅", "", text) text = re.sub("’|`", "'", text) text = re.sub("ə", "a", text) text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text) text = re.sub("\s+", " ", text) elif lang == "es": text = re.sub("\.|-|–|—|/|=|_|{|…", " ", text) text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text) text = re.sub("蝦|夷", "", text) text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text) text = re.sub("\s+", " ", text) elif lang == "ca": text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text) text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text) text = re.sub("ঃ|ং", "", text) text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text) text = re.sub("\s+", " ", text) elif lang == "pl": text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text) text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text) text = re.sub("q", "k", text) text = re.sub("x", "ks", text) text = re.sub("v", "w", text) text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text) text = re.sub("\s+", " ", text) elif lang in ("ru", "ru34"): text = re.sub("по-", "по", text) text = re.sub("во-", "во", text) text = re.sub("-то", "то", text) text = re.sub("\.|−|-|–|—|…", " ", text) text = re.sub(",|;|:|!|\?|‘|’|\"|“|”|«|»|'", "", text) text = re.sub("m", "м", text) text = re.sub("o", "о", text) text = re.sub("z", "з", text) text = re.sub("i", "и", text) text = re.sub("l", "л", text) text = re.sub("a", "а", text) text = re.sub("f", "ф", text) text = re.sub("r", "р", text) text = re.sub("e", "е", text) text = re.sub("x", "кс", text) text = re.sub("h", "х", text) text = re.sub("\s+", " ", text) if lang == "ru34": text = re.sub("ё", "е", text) text = re.sub(" $", "", text) # print("<== {}".format(text)) text = np.array([vocabulary_dict[c] for c in text], dtype=np.long) self.data.append((wav_file_path, text)) # continue if os.path.exists(wav_file_path): continue # pass x, sr = librosa.load(path=mp3_file_path, sr=None) if desired_audio_sample_rate != sr: y = lr_resample(y=x, orig_sr=sr, target_sr=desired_audio_sample_rate) soundfile.write(file=wav_file_path, data=y, samplerate=desired_audio_sample_rate) @staticmethod def get_vocabulary_for_lang(lang="en"): """ Get the vocabulary for a language. Parameters: ---------- lang : str, default 'en' Language. Returns: ------- list of str Vocabulary set. """ assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34")) if lang == "en": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] elif lang == "fr": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï', 'ü', 'ÿ'] elif lang == "de": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß'] elif lang == "it": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù'] elif lang == "es": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü'] elif lang == "ca": return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ'] elif lang == "pl": return [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń', 'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż'] elif lang == "ru": return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] elif lang == "ru34": return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] else: return None class McvMetaInfo(DatasetMetaInfo): def __init__(self): super(McvMetaInfo, self).__init__() self.label = "MCV" self.short_label = "mcv" self.root_dir_name = "cv-corpus-6.1-2020-12-11" self.dataset_class = McvDataset self.lang = "en" self.dataset_class_extra_kwargs = { "lang": self.lang, "subset": "dev"} self.ml_type = "asr" self.num_classes = None self.val_metric_extra_kwargs = [{"vocabulary": None}] self.val_metric_capts = ["Val.WER"] self.val_metric_names = ["WER"] self.test_metric_extra_kwargs = [{"vocabulary": None}] self.test_metric_capts = ["Test.WER"] self.test_metric_names = ["WER"] self.val_transform = asr_test_transform self.test_transform = asr_test_transform self.saver_acc_ind = 0 def add_dataset_parser_arguments(self, parser, work_dir_path): """ Create python script parameters (for dataset specific metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory. """ super(McvMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--lang", type=str, default="en", help="language") parser.add_argument( "--subset", type=str, default="dev", help="data subset") def update(self, args): """ Update dataset metainfo after user customizing. Parameters: ---------- args : ArgumentParser Main script arguments. """ super(McvMetaInfo, self).update(args) self.lang = args.lang self.dataset_class_extra_kwargs["lang"] = args.lang self.dataset_class_extra_kwargs["subset"] = args.subset def update_from_dataset(self, dataset): """ Update dataset metainfo after a dataset class instance creation. Parameters: ---------- args : obj A dataset class instance. """ vocabulary = dataset.vocabulary self.num_classes = len(vocabulary) + 1 self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
14,287
41.906907
119
py
imgclsmob
imgclsmob-master/pytorch/datasets/voc_seg_dataset.py
""" Pascal VOC2012 semantic segmentation dataset. """ import os import numpy as np from PIL import Image import torchvision.transforms as transforms from .seg_dataset import SegDataset from .dataset_metainfo import DatasetMetaInfo class VOCSegDataset(SegDataset): """ Pascal VOC2012 semantic segmentation dataset. Parameters: ---------- root : str Path to VOCdevkit folder. mode : str, default 'train' 'train', 'val', 'test', or 'demo'. transform : callable, optional A function that transforms the image. """ def __init__(self, root, mode="train", transform=None, **kwargs): super(VOCSegDataset, self).__init__( root=root, mode=mode, transform=transform, **kwargs) base_dir_path = os.path.join(root, "VOC2012") image_dir_path = os.path.join(base_dir_path, "JPEGImages") mask_dir_path = os.path.join(base_dir_path, "SegmentationClass") splits_dir_path = os.path.join(base_dir_path, "ImageSets", "Segmentation") if mode == "train": split_file_path = os.path.join(splits_dir_path, "train.txt") elif mode in ("val", "test", "demo"): split_file_path = os.path.join(splits_dir_path, "val.txt") else: raise RuntimeError("Unknown dataset splitting mode") self.images = [] self.masks = [] with open(os.path.join(split_file_path), "r") as lines: for line in lines: image_file_path = os.path.join(image_dir_path, line.rstrip('\n') + ".jpg") assert os.path.isfile(image_file_path) self.images.append(image_file_path) mask_file_path = os.path.join(mask_dir_path, line.rstrip('\n') + ".png") assert os.path.isfile(mask_file_path) self.masks.append(mask_file_path) assert (len(self.images) == len(self.masks)) def __getitem__(self, index): image = Image.open(self.images[index]).convert("RGB") if self.mode == "demo": image = self._img_transform(image) if self.transform is not None: image = self.transform(image) return image, os.path.basename(self.images[index]) mask = Image.open(self.masks[index]) if self.mode == "train": image, mask = self._sync_transform(image, mask) elif self.mode == "val": image, mask = self._val_sync_transform(image, mask) else: assert self.mode == "test" image, mask = self._img_transform(image), self._mask_transform(mask) if self.transform is not None: image = self.transform(image) return image, mask classes = 21 vague_idx = 255 use_vague = True background_idx = 0 ignore_bg = True @staticmethod def _mask_transform(mask): np_mask = np.array(mask).astype(np.int32) # np_mask[np_mask == 255] = VOCSegDataset.vague_idx return np_mask def __len__(self): return len(self.images) def voc_test_transform(ds_metainfo, mean_rgb=(0.485, 0.456, 0.406), std_rgb=(0.229, 0.224, 0.225)): assert (ds_metainfo is not None) return transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=mean_rgb, std=std_rgb) ]) class VOCMetaInfo(DatasetMetaInfo): def __init__(self): super(VOCMetaInfo, self).__init__() self.label = "VOC" self.short_label = "voc" self.root_dir_name = "voc" self.dataset_class = VOCSegDataset self.num_training_samples = None self.in_channels = 3 self.num_classes = VOCSegDataset.classes self.input_image_size = (480, 480) self.train_metric_capts = None self.train_metric_names = None self.train_metric_extra_kwargs = None self.val_metric_capts = None self.val_metric_names = None self.test_metric_extra_kwargs = [{}, {}] self.test_metric_capts = ["Val.PixAcc", "Val.IoU"] self.test_metric_names = ["PixelAccuracyMetric", "MeanIoUMetric"] self.test_metric_extra_kwargs = [ {"vague_idx": VOCSegDataset.vague_idx, "use_vague": VOCSegDataset.use_vague, "macro_average": False}, {"num_classes": VOCSegDataset.classes, "vague_idx": VOCSegDataset.vague_idx, "use_vague": VOCSegDataset.use_vague, "bg_idx": VOCSegDataset.background_idx, "ignore_bg": VOCSegDataset.ignore_bg, "macro_average": False}] self.saver_acc_ind = 1 self.train_transform = None self.val_transform = voc_test_transform self.test_transform = voc_test_transform self.ml_type = "imgseg" self.allow_hybridize = False self.net_extra_kwargs = {"aux": False, "fixed_size": False} self.load_ignore_extra = True self.image_base_size = 520 self.image_crop_size = 480 def add_dataset_parser_arguments(self, parser, work_dir_path): super(VOCMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--image-base-size", type=int, default=520, help="base image size") parser.add_argument( "--image-crop-size", type=int, default=480, help="crop image size") def update(self, args): super(VOCMetaInfo, self).update(args) self.image_base_size = args.image_base_size self.image_crop_size = args.image_crop_size
5,894
33.273256
90
py
imgclsmob
imgclsmob-master/pytorch/datasets/cifar100_cls_dataset.py
""" CIFAR-100 classification dataset. """ import os from torchvision.datasets import CIFAR100 from .cifar10_cls_dataset import CIFAR10MetaInfo class CIFAR100Fine(CIFAR100): """ CIFAR-100 image classification dataset. Parameters: ---------- root : str, default '~/.torch/datasets/cifar100' Path to temp folder for storing data. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "cifar100"), mode="train", transform=None): super(CIFAR100Fine, self).__init__( root=root, train=(mode == "train"), transform=transform, download=True) class CIFAR100MetaInfo(CIFAR10MetaInfo): def __init__(self): super(CIFAR100MetaInfo, self).__init__() self.label = "CIFAR100" self.root_dir_name = "cifar100" self.dataset_class = CIFAR100Fine self.num_classes = 100
1,132
25.97619
74
py
imgclsmob
imgclsmob-master/pytorch/datasets/hpatches_mch_dataset.py
""" HPatches image matching dataset. """ import os import cv2 import numpy as np import torch.utils.data as data import torchvision.transforms as transforms from .dataset_metainfo import DatasetMetaInfo class HPatches(data.Dataset): """ HPatches (full image sequences) image matching dataset. Info URL: https://github.com/hpatches/hpatches-dataset Data URL: http://icvl.ee.ic.ac.uk/vbalnt/hpatches/hpatches-sequences-release.tar.gz Parameters: ---------- root : str, default '~/.torch/datasets/hpatches' Path to the folder stored the dataset. mode : str, default 'train' 'train', 'val', or 'test'. alteration : str, default 'all' 'all', 'i' for illumination or 'v' for viewpoint. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, root=os.path.join("~", ".torch", "datasets", "hpatches"), mode="train", alteration="all", transform=None): super(HPatches, self).__init__() assert os.path.exists(root) num_images = 5 image_file_ext = ".ppm" self.mode = mode self.image_paths = [] self.warped_image_paths = [] self.homographies = [] subdir_names = [name for name in os.listdir(root) if os.path.isdir(os.path.join(root, name))] # subdir_names.sort() if alteration != "all": subdir_names = [name for name in subdir_names if name[0] == alteration] for subdir_name in subdir_names: subdir_path = os.path.join(root, subdir_name) for i in range(num_images): k = i + 2 self.image_paths.append(os.path.join(subdir_path, "1" + image_file_ext)) self.warped_image_paths.append(os.path.join(subdir_path, str(k) + image_file_ext)) self.homographies.append(np.loadtxt(os.path.join(subdir_path, "H_1_" + str(k)))) self.transform = transform def __getitem__(self, index): # print("Image file name: {}, index: {}".format(self.image_paths[index], index)) image = cv2.imread(self.image_paths[index], flags=0) # if image.shape[0] > 1500: # image = cv2.resize( # src=image, # dsize=None, # fx=0.5, # fy=0.5, # interpolation=cv2.INTER_AREA) # print("Image shape: {}".format(image.shape)) warped_image = cv2.imread(self.warped_image_paths[index], flags=0) # if warped_image.shape[0] > 1500: # warped_image = cv2.resize( # src=warped_image, # dsize=None, # fx=0.5, # fy=0.5, # interpolation=cv2.INTER_AREA) # print("W-Image shape: {}".format(warped_image.shape)) homography = self.homographies[index].astype(np.float32) if self.transform is not None: image = self.transform(image) warped_image = self.transform(warped_image) return image, warped_image, homography def __len__(self): return len(self.image_paths) class HPatchesMetaInfo(DatasetMetaInfo): def __init__(self): super(HPatchesMetaInfo, self).__init__() self.label = "hpatches" self.short_label = "hpatches" self.root_dir_name = "hpatches" self.dataset_class = HPatches self.ml_type = "imgmch" self.do_transform = True self.val_transform = hpatches_val_transform self.test_transform = hpatches_val_transform self.allow_hybridize = False self.net_extra_kwargs = {} def add_dataset_parser_arguments(self, parser, work_dir_path): super(HPatchesMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--alteration", type=str, default="all", help="dataset alternation. options are all, i, or v") def update(self, args): super(HPatchesMetaInfo, self).update(args) self.dataset_class_extra_kwargs = {"alteration": args.alteration} def hpatches_val_transform(ds_metainfo): assert (ds_metainfo is not None) return transforms.Compose([ transforms.ToTensor() ])
4,450
33.773438
101
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/airnext.py
""" AirNeXt for ImageNet-1K, implemented in PyTorch. Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. """ __all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16'] import os import math import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .airnet import AirBlock, AirInitBlock class AirNeXtBottleneck(nn.Module): """ AirNet bottleneck block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, ratio): super(AirNeXtBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D self.use_air_block = (stride == 1 and mid_channels < 512) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=group_width) self.conv2 = conv3x3_block( in_channels=group_width, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) if self.use_air_block: self.air = AirBlock( in_channels=in_channels, out_channels=group_width, groups=(cardinality // ratio), ratio=ratio) def forward(self, x): if self.use_air_block: att = self.air(x) x = self.conv1(x) x = self.conv2(x) if self.use_air_block: x = x * att x = self.conv3(x) return x class AirNeXtUnit(nn.Module): """ AirNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, ratio): super(AirNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = AirNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, ratio=ratio) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class AirNeXt(nn.Module): """ AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. ratio: int Air compression ratio. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, ratio, in_channels=3, in_size=(224, 224), num_classes=1000): super(AirNeXt, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", AirInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), AirNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, ratio=ratio)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_airnext(blocks, cardinality, bottleneck_width, base_channels, ratio, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create AirNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. base_channels: int Base number of channels. ratio: int Air compression ratio. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks)) bottleneck_expansion = 4 init_block_channels = base_channels channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = AirNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, ratio=ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def airnext50_32x4d_r2(**kwargs): """ AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnext( blocks=50, cardinality=32, bottleneck_width=4, base_channels=64, ratio=2, model_name="airnext50_32x4d_r2", **kwargs) def airnext101_32x4d_r2(**kwargs): """ AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnext( blocks=101, cardinality=32, bottleneck_width=4, base_channels=64, ratio=2, model_name="airnext101_32x4d_r2", **kwargs) def airnext101_32x4d_r16(**kwargs): """ AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnext( blocks=101, cardinality=32, bottleneck_width=4, base_channels=64, ratio=16, model_name="airnext101_32x4d_r16", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ airnext50_32x4d_r2, airnext101_32x4d_r2, airnext101_32x4d_r16, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != airnext50_32x4d_r2 or weight_count == 27604296) assert (model != airnext101_32x4d_r2 or weight_count == 54099272) assert (model != airnext101_32x4d_r16 or weight_count == 45456456) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,535
29.041667
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/pspnet.py
""" PSPNet for image segmentation, implemented in PyTorch. Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. """ __all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco', 'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k', 'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling'] import os import torch.nn as nn import torch.nn.functional as F from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, Identity from .resnetd import resnetd50b, resnetd101b class PSPFinalBlock(nn.Module): """ PSPNet final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, bottleneck_factor=4): super(PSPFinalBlock, self).__init__() assert (in_channels % bottleneck_factor == 0) mid_channels = in_channels // bottleneck_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x, out_size): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) x = F.interpolate(x, size=out_size, mode="bilinear", align_corners=True) return x class PyramidPoolingBranch(nn.Module): """ Pyramid Pooling branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. pool_out_size : int Target output size of the image. upscale_out_size : tuple of 2 int Spatial size of output image for the bilinear upsampling operation. """ def __init__(self, in_channels, out_channels, pool_out_size, upscale_out_size): super(PyramidPoolingBranch, self).__init__() self.upscale_out_size = upscale_out_size self.pool = nn.AdaptiveAvgPool2d(pool_out_size) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) def forward(self, x): in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = F.interpolate(x, size=in_size, mode="bilinear", align_corners=True) return x class PyramidPooling(nn.Module): """ Pyramid Pooling module. Parameters: ---------- in_channels : int Number of input channels. upscale_out_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. """ def __init__(self, in_channels, upscale_out_size): super(PyramidPooling, self).__init__() pool_out_sizes = [1, 2, 3, 6] assert (len(pool_out_sizes) == 4) assert (in_channels % 4 == 0) mid_channels = in_channels // 4 self.branches = Concurrent() self.branches.add_module("branch1", Identity()) for i, pool_out_size in enumerate(pool_out_sizes): self.branches.add_module("branch{}".format(i + 2), PyramidPoolingBranch( in_channels=in_channels, out_channels=mid_channels, pool_out_size=pool_out_size, upscale_out_size=upscale_out_size)) def forward(self, x): x = self.branches(x) return x class PSPNet(nn.Module): """ PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. num_classes : int, default 21 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21): super(PSPNet, self).__init__() assert (in_channels > 0) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.backbone = backbone pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None self.pool = PyramidPooling( in_channels=backbone_out_channels, upscale_out_size=pool_out_size) pool_out_channels = 2 * backbone_out_channels self.final_block = PSPFinalBlock( in_channels=pool_out_channels, out_channels=num_classes, bottleneck_factor=8) if self.aux: aux_out_channels = backbone_out_channels // 2 self.aux_block = PSPFinalBlock( in_channels=aux_out_channels, out_channels=num_classes, bottleneck_factor=4) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, y = self.backbone(x) x = self.pool(x) x = self.final_block(x, in_size) if self.aux: y = self.aux_block(y, in_size) return x, y else: return x def get_pspnet(backbone, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PSPNet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = PSPNet( backbone=backbone, num_classes=num_classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def pspnet_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_voc", **kwargs) def pspnet_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_voc", **kwargs) def pspnet_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_coco", **kwargs) def pspnet_resnetd101b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_coco", **kwargs) def pspnet_resnetd50b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_ade20k", **kwargs) def pspnet_resnetd101b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_ade20k", **kwargs) def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes", **kwargs) def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (480, 480) aux = False pretrained = False models = [ (pspnet_resnetd50b_voc, 21), (pspnet_resnetd101b_voc, 21), (pspnet_resnetd50b_coco, 21), (pspnet_resnetd101b_coco, 21), (pspnet_resnetd50b_ade20k, 150), (pspnet_resnetd101b_ade20k, 150), (pspnet_resnetd50b_cityscapes, 19), (pspnet_resnetd101b_cityscapes, 19), ] for model, num_classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != pspnet_resnetd50b_voc or weight_count == 49081578) assert (model != pspnet_resnetd101b_voc or weight_count == 68073706) assert (model != pspnet_resnetd50b_coco or weight_count == 49081578) assert (model != pspnet_resnetd101b_coco or weight_count == 68073706) assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908) assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036) assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038) assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166) else: assert (model != pspnet_resnetd50b_voc or weight_count == 46716373) assert (model != pspnet_resnetd101b_voc or weight_count == 65708501) assert (model != pspnet_resnetd50b_coco or weight_count == 46716373) assert (model != pspnet_resnetd101b_coco or weight_count == 65708501) assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550) assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678) assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347) assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475) x = torch.randn(1, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
18,380
35.909639
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/dla.py
""" DLA for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. """ __all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block from .resnet import ResBlock, ResBottleneck from .resnext import ResNeXtBottleneck class DLABottleneck(ResBottleneck): """ DLA bottleneck block for residual path in residual block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 2 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=2): super(DLABottleneck, self).__init__( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck_factor=bottleneck_factor) class DLABottleneckX(ResNeXtBottleneck): """ DLA ResNeXt-like bottleneck block for residual path in residual block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int, default 32 Number of groups. bottleneck_width: int, default 8 Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality=32, bottleneck_width=8): super(DLABottleneckX, self).__init__( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) class DLAResBlock(nn.Module): """ DLA residual block with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. body_class : nn.Module, default ResBlock Residual block body class. return_down : bool, default False Whether return downsample result. """ def __init__(self, in_channels, out_channels, stride, body_class=ResBlock, return_down=False): super(DLAResBlock, self).__init__() self.return_down = return_down self.downsample = (stride > 1) self.project = (in_channels != out_channels) self.body = body_class( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = nn.ReLU(inplace=True) if self.downsample: self.downsample_pool = nn.MaxPool2d( kernel_size=stride, stride=stride) if self.project: self.project_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) def forward(self, x): down = self.downsample_pool(x) if self.downsample else x identity = self.project_conv(down) if self.project else down if identity is None: identity = x x = self.body(x) x += identity x = self.activ(x) if self.return_down: return x, down else: return x class DLARoot(nn.Module): """ DLA root block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. residual : bool Whether use residual connection. """ def __init__(self, in_channels, out_channels, residual): super(DLARoot, self).__init__() self.residual = residual self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x2, x1, extra): last_branch = x2 x = torch.cat((x2, x1) + tuple(extra), dim=1) x = self.conv(x) if self.residual: x += last_branch x = self.activ(x) return x class DLATree(nn.Module): """ DLA tree unit. It's like iterative stage. Parameters: ---------- levels : int Number of levels in the stage. in_channels : int Number of input channels. out_channels : int Number of output channels. res_body_class : nn.Module Residual block body class. stride : int or tuple/list of 2 int Strides of the convolution in a residual block. root_residual : bool Whether use residual connection in the root. root_dim : int Number of input channels in the root block. first_tree : bool, default False Is this tree stage the first stage in the net. input_level : bool, default True Is this tree unit the first unit in the stage. return_down : bool, default False Whether return downsample result. """ def __init__(self, levels, in_channels, out_channels, res_body_class, stride, root_residual, root_dim=0, first_tree=False, input_level=True, return_down=False): super(DLATree, self).__init__() self.return_down = return_down self.add_down = (input_level and not first_tree) self.root_level = (levels == 1) if root_dim == 0: root_dim = 2 * out_channels if self.add_down: root_dim += in_channels if self.root_level: self.tree1 = DLAResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, body_class=res_body_class, return_down=True) self.tree2 = DLAResBlock( in_channels=out_channels, out_channels=out_channels, stride=1, body_class=res_body_class, return_down=False) else: self.tree1 = DLATree( levels=levels - 1, in_channels=in_channels, out_channels=out_channels, res_body_class=res_body_class, stride=stride, root_residual=root_residual, root_dim=0, input_level=False, return_down=True) self.tree2 = DLATree( levels=levels - 1, in_channels=out_channels, out_channels=out_channels, res_body_class=res_body_class, stride=1, root_residual=root_residual, root_dim=root_dim + out_channels, input_level=False, return_down=False) if self.root_level: self.root = DLARoot( in_channels=root_dim, out_channels=out_channels, residual=root_residual) def forward(self, x, extra=None): extra = [] if extra is None else extra x1, down = self.tree1(x) if self.add_down: extra.append(down) if self.root_level: x2 = self.tree2(x1) x = self.root(x2, x1, extra) else: extra.append(x1) x = self.tree2(x1, extra) if self.return_down: return x, down else: return x class DLAInitBlock(nn.Module): """ DLA specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DLAInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class DLA(nn.Module): """ DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- levels : int Number of levels in each stage. channels : list of int Number of output channels for each stage. init_block_channels : int Number of output channels for the initial unit. res_body_class : nn.Module Residual block body class. residual_root : bool Whether use residual connection in the root blocks. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, levels, channels, init_block_channels, res_body_class, residual_root, in_channels=3, in_size=(224, 224), num_classes=1000): super(DLA, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", DLAInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i in range(len(levels)): levels_i = levels[i] out_channels = channels[i] first_tree = (i == 0) self.features.add_module("stage{}".format(i + 1), DLATree( levels=levels_i, in_channels=in_channels, out_channels=out_channels, res_body_class=res_body_class, stride=2, root_residual=residual_root, first_tree=first_tree)) in_channels = out_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_dla(levels, channels, res_body_class, residual_root=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DLA model with specific parameters. Parameters: ---------- levels : int Number of levels in each stage. channels : list of int Number of output channels for each stage. res_body_class : nn.Module Residual block body class. residual_root : bool, default False Whether use residual connection in the root blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 net = DLA( levels=levels, channels=channels, init_block_channels=init_block_channels, res_body_class=res_body_class, residual_root=residual_root, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def dla34(**kwargs): """ DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34", **kwargs) def dla46c(**kwargs): """ DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c", **kwargs) def dla46xc(**kwargs): """ DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX, model_name="dla46xc", **kwargs) def dla60(**kwargs): """ DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck, model_name="dla60", **kwargs) def dla60x(**kwargs): """ DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX, model_name="dla60x", **kwargs) def dla60xc(**kwargs): """ DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX, model_name="dla60xc", **kwargs) def dla102(**kwargs): """ DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck, residual_root=True, model_name="dla102", **kwargs) def dla102x(**kwargs): """ DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX, residual_root=True, model_name="dla102x", **kwargs) def dla102x2(**kwargs): """ DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ class DLABottleneckX64(DLABottleneckX): def __init__(self, in_channels, out_channels, stride): super(DLABottleneckX64, self).__init__(in_channels, out_channels, stride, cardinality=64) return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64, residual_root=True, model_name="dla102x2", **kwargs) def dla169(**kwargs): """ DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck, residual_root=True, model_name="dla169", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ dla34, dla46c, dla46xc, dla60, dla60x, dla60xc, dla102, dla102x, dla102x2, dla169, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dla34 or weight_count == 15742104) assert (model != dla46c or weight_count == 1301400) assert (model != dla46xc or weight_count == 1068440) assert (model != dla60 or weight_count == 22036632) assert (model != dla60x or weight_count == 17352344) assert (model != dla60xc or weight_count == 1319832) assert (model != dla102 or weight_count == 33268888) assert (model != dla102x or weight_count == 26309272) assert (model != dla102x2 or weight_count == 41282200) assert (model != dla169 or weight_count == 53389720) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
19,884
29.734158
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/proxylessnas.py
""" ProxylessNAS for ImageNet-1K, implemented in PyTorch. Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. """ __all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14', 'ProxylessUnit', 'get_proxylessnas'] import os import torch.nn as nn import torch.nn.init as init from .common import ConvBlock, conv1x1_block, conv3x3_block class ProxylessBlock(nn.Module): """ ProxylessNAS block for residual path in ProxylessNAS unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. stride : int Strides of the convolution. bn_eps : float Small float added to variance in Batch norm. expansion : int Expansion ratio. """ def __init__(self, in_channels, out_channels, kernel_size, stride, bn_eps, expansion): super(ProxylessBlock, self).__init__() self.use_bc = (expansion > 1) mid_channels = in_channels * expansion if self.use_bc: self.bc_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation="relu6") padding = (kernel_size - 1) // 2 self.dw_conv = ConvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=mid_channels, bn_eps=bn_eps, activation="relu6") self.pw_conv = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def forward(self, x): if self.use_bc: x = self.bc_conv(x) x = self.dw_conv(x) x = self.pw_conv(x) return x class ProxylessUnit(nn.Module): """ ProxylessNAS unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size for body block. stride : int Strides of the convolution. bn_eps : float Small float added to variance in Batch norm. expansion : int Expansion ratio for body block. residual : bool Whether to use residual branch. shortcut : bool Whether to use identity branch. """ def __init__(self, in_channels, out_channels, kernel_size, stride, bn_eps, expansion, residual, shortcut): super(ProxylessUnit, self).__init__() assert (residual or shortcut) self.residual = residual self.shortcut = shortcut if self.residual: self.body = ProxylessBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, bn_eps=bn_eps, expansion=expansion) def forward(self, x): if not self.residual: return x if not self.shortcut: return self.body(x) identity = x x = self.body(x) x = identity + x return x class ProxylessNAS(nn.Module): """ ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. residuals : list of list of int Whether to use residual branch in units. shortcuts : list of list of int Whether to use identity branch in units. kernel_sizes : list of list of int Convolution window size for each units. expansions : list of list of int Expansion ratio for each units. bn_eps : float, default 1e-3 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, residuals, shortcuts, kernel_sizes, expansions, bn_eps=1e-3, in_channels=3, in_size=(224, 224), num_classes=1000): super(ProxylessNAS, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, bn_eps=bn_eps, activation="relu6")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() residuals_per_stage = residuals[i] shortcuts_per_stage = shortcuts[i] kernel_sizes_per_stage = kernel_sizes[i] expansions_per_stage = expansions[i] for j, out_channels in enumerate(channels_per_stage): residual = (residuals_per_stage[j] == 1) shortcut = (shortcuts_per_stage[j] == 1) kernel_size = kernel_sizes_per_stage[j] expansion = expansions_per_stage[j] stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ProxylessUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, bn_eps=bn_eps, expansion=expansion, residual=residual, shortcut=shortcut)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, activation="relu6")) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_proxylessnas(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ProxylessNAS model with specific parameters. Parameters: ---------- version : str Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "cpu": residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]] channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104], [216, 216, 216, 216, 360]] kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]] expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]] init_block_channels = 40 final_block_channels = 1432 elif version == "gpu": residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]] channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128], [256, 256, 256, 256, 432]] kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]] expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]] init_block_channels = 40 final_block_channels = 1728 elif version == "mobile": residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]] channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96], [192, 192, 192, 192, 320]] kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]] expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]] init_block_channels = 32 final_block_channels = 1280 elif version == "mobile14": residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]] channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136], [256, 256, 256, 256, 448]] kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]] expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]] init_block_channels = 48 final_block_channels = 1792 else: raise ValueError("Unsupported ProxylessNAS version: {}".format(version)) shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]] net = ProxylessNAS( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, residuals=residuals, shortcuts=shortcuts, kernel_sizes=kernel_sizes, expansions=expansions, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def proxylessnas_cpu(**kwargs): """ ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs) def proxylessnas_gpu(**kwargs): """ ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs) def proxylessnas_mobile(**kwargs): """ ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs) def proxylessnas_mobile14(**kwargs): """ ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,' https://arxiv.org/abs/1812.00332. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ proxylessnas_cpu, proxylessnas_gpu, proxylessnas_mobile, proxylessnas_mobile14, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != proxylessnas_cpu or weight_count == 4361648) assert (model != proxylessnas_gpu or weight_count == 7119848) assert (model != proxylessnas_mobile or weight_count == 4080512) assert (model != proxylessnas_mobile14 or weight_count == 6857568) x = torch.randn(14, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, 1000)) if __name__ == "__main__": _test()
14,555
33.492891
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/isqrtcovresnet.py
""" iSQRT-COV-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. """ __all__ = ['iSQRTCOVResNet', 'isqrtcovresnet18', 'isqrtcovresnet34', 'isqrtcovresnet50', 'isqrtcovresnet50b', 'isqrtcovresnet101', 'isqrtcovresnet101b'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block from .resnet import ResUnit, ResInitBlock class CovPool(torch.autograd.Function): """ Covariance pooling function. """ @staticmethod def forward(ctx, x): batch, channels, height, width = x.size() n = height * width xn = x.reshape(batch, channels, n) identity_bar = ((1.0 / n) * torch.eye(n, dtype=xn.dtype, device=xn.device)).unsqueeze(dim=0).repeat(batch, 1, 1) ones_bar = torch.full((batch, n, n), fill_value=(-1.0 / n / n), dtype=xn.dtype, device=xn.device) i_bar = identity_bar + ones_bar sigma = xn.bmm(i_bar).bmm(xn.transpose(1, 2)) ctx.save_for_backward(x, i_bar) return sigma @staticmethod def backward(ctx, grad_sigma): x, i_bar = ctx.saved_tensors batch, channels, height, width = x.size() n = height * width xn = x.reshape(batch, channels, n) grad_x = grad_sigma + grad_sigma.transpose(1, 2) grad_x = grad_x.bmm(xn).bmm(i_bar) grad_x = grad_x.reshape(batch, channels, height, width) return grad_x class NewtonSchulzSqrt(torch.autograd.Function): """ Newton-Schulz iterative matrix square root function. Parameters: ---------- x : Tensor Input tensor (batch * cols * rows). n : int Number of iterations (n > 1). """ @staticmethod def forward(ctx, x, n): assert (n > 1) batch, cols, rows = x.size() assert (cols == rows) m = cols identity = torch.eye(m, dtype=x.dtype, device=x.device).unsqueeze(dim=0).repeat(batch, 1, 1) x_trace = (x * identity).sum(dim=(1, 2), keepdim=True) a = x / x_trace i3 = 3.0 * identity yi = torch.zeros(batch, n - 1, m, m, dtype=x.dtype, device=x.device) zi = torch.zeros(batch, n - 1, m, m, dtype=x.dtype, device=x.device) b2 = 0.5 * (i3 - a) yi[:, 0, :, :] = a.bmm(b2) zi[:, 0, :, :] = b2 for i in range(1, n - 1): b2 = 0.5 * (i3 - zi[:, i - 1, :, :].bmm(yi[:, i - 1, :, :])) yi[:, i, :, :] = yi[:, i - 1, :, :].bmm(b2) zi[:, i, :, :] = b2.bmm(zi[:, i - 1, :, :]) b2 = 0.5 * (i3 - zi[:, n - 2, :, :].bmm(yi[:, n - 2, :, :])) yn = yi[:, n - 2, :, :].bmm(b2) x_trace_sqrt = torch.sqrt(x_trace) c = yn * x_trace_sqrt ctx.save_for_backward(x, x_trace, a, yi, zi, yn, x_trace_sqrt) ctx.n = n return c @staticmethod def backward(ctx, grad_c): x, x_trace, a, yi, zi, yn, x_trace_sqrt = ctx.saved_tensors n = ctx.n batch, m, _ = x.size() identity0 = torch.eye(m, dtype=x.dtype, device=x.device) identity = identity0.unsqueeze(dim=0).repeat(batch, 1, 1) i3 = 3.0 * identity grad_yn = grad_c * x_trace_sqrt b = i3 - yi[:, n - 2, :, :].bmm(zi[:, n - 2, :, :]) grad_yi = 0.5 * (grad_yn.bmm(b) - zi[:, n - 2, :, :].bmm(yi[:, n - 2, :, :]).bmm(grad_yn)) grad_zi = -0.5 * yi[:, n - 2, :, :].bmm(grad_yn).bmm(yi[:, n - 2, :, :]) for i in range(n - 3, -1, -1): b = i3 - yi[:, i, :, :].bmm(zi[:, i, :, :]) ziyi = zi[:, i, :, :].bmm(yi[:, i, :, :]) grad_yi_m1 = 0.5 * (grad_yi.bmm(b) - zi[:, i, :, :].bmm(grad_zi).bmm(zi[:, i, :, :]) - ziyi.bmm(grad_yi)) grad_zi_m1 = 0.5 * (b.bmm(grad_zi) - yi[:, i, :, :].bmm(grad_yi).bmm(yi[:, i, :, :]) - grad_zi.bmm(ziyi)) grad_yi = grad_yi_m1 grad_zi = grad_zi_m1 grad_a = 0.5 * (grad_yi.bmm(i3 - a) - grad_zi - a.bmm(grad_yi)) x_trace_sqr = x_trace * x_trace grad_atx_trace = (grad_a.transpose(1, 2).bmm(x) * identity).sum(dim=(1, 2), keepdim=True) grad_cty_trace = (grad_c.transpose(1, 2).bmm(yn) * identity).sum(dim=(1, 2), keepdim=True) grad_x_extra = (0.5 * grad_cty_trace / x_trace_sqrt - grad_atx_trace / x_trace_sqr).repeat(1, m, m) * identity grad_x = grad_a / x_trace + grad_x_extra return grad_x, None class Triuvec(torch.autograd.Function): """ Extract upper triangular part of matrix into vector form. """ @staticmethod def forward(ctx, x): batch, cols, rows = x.size() assert (cols == rows) n = cols triuvec_inds = torch.ones(n, n).triu().view(n * n).nonzero() # assert (len(triuvec_inds) == n * (n + 1) // 2) x_vec = x.reshape(batch, -1) y = x_vec[:, triuvec_inds] ctx.save_for_backward(x, triuvec_inds) return y @staticmethod def backward(ctx, grad_y): x, triuvec_inds = ctx.saved_tensors batch, n, _ = x.size() grad_x = torch.zeros_like(x).view(batch, -1) grad_x[:, triuvec_inds] = grad_y grad_x = grad_x.view(batch, n, n) return grad_x class iSQRTCOVPool(nn.Module): """ iSQRT-COV pooling layer. Parameters: ---------- num_iter : int, default 5 Number of iterations (num_iter > 1). """ def __init__(self, num_iter=5): super(iSQRTCOVPool, self).__init__() self.num_iter = num_iter self.cov_pool = CovPool.apply self.sqrt = NewtonSchulzSqrt.apply self.triuvec = Triuvec.apply def forward(self, x): x = self.cov_pool(x) x = self.sqrt(x, self.num_iter) x = self.triuvec(x) return x class iSQRTCOVResNet(nn.Module): """ iSQRT-COV-ResNet model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(iSQRTCOVResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i not in [0, len(channels) - 1]) else 1 stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels self.features.add_module("final_pool", iSQRTCOVPool()) in_features = in_channels * (in_channels + 1) // 2 self.output = nn.Linear( in_features=in_features, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_isqrtcovresnet(blocks, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create iSQRT-COV-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported iSQRT-COV-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 final_block_channels = 256 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = iSQRTCOVResNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def isqrtcovresnet18(**kwargs): """ iSQRT-COV-ResNet-18 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=18, model_name="isqrtcovresnet18", **kwargs) def isqrtcovresnet34(**kwargs): """ iSQRT-COV-ResNet-34 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=34, model_name="isqrtcovresnet34", **kwargs) def isqrtcovresnet50(**kwargs): """ iSQRT-COV-ResNet-50 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=50, model_name="isqrtcovresnet50", **kwargs) def isqrtcovresnet50b(**kwargs): """ iSQRT-COV-ResNet-50 model with stride at the second convolution in bottleneck block from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=50, conv1_stride=False, model_name="isqrtcovresnet50b", **kwargs) def isqrtcovresnet101(**kwargs): """ iSQRT-COV-ResNet-101 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=101, model_name="isqrtcovresnet101", **kwargs) def isqrtcovresnet101b(**kwargs): """ iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,' https://arxiv.org/abs/1712.01034. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ isqrtcovresnet18, isqrtcovresnet34, isqrtcovresnet50, isqrtcovresnet50b, isqrtcovresnet101, isqrtcovresnet101b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != isqrtcovresnet18 or weight_count == 44205096) assert (model != isqrtcovresnet34 or weight_count == 54313256) assert (model != isqrtcovresnet50 or weight_count == 56929832) assert (model != isqrtcovresnet50b or weight_count == 56929832) assert (model != isqrtcovresnet101 or weight_count == 75921960) assert (model != isqrtcovresnet101b or weight_count == 75921960) x = torch.randn(14, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, 1000)) if __name__ == "__main__": _test()
15,872
33.885714
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/shufflenetv2.py
""" ShuffleNet V2 for ImageNet-1K, implemented in PyTorch. Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. """ __all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock class ShuffleUnit(nn.Module): """ ShuffleNetV2 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether do downsample. use_se : bool Whether to use SE block. use_residual : bool Whether to use residual connection. """ def __init__(self, in_channels, out_channels, downsample, use_se, use_residual): super(ShuffleUnit, self).__init__() self.downsample = downsample self.use_se = use_se self.use_residual = use_residual mid_channels = out_channels // 2 self.compress_conv1 = conv1x1( in_channels=(in_channels if self.downsample else mid_channels), out_channels=mid_channels) self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels) self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, stride=(2 if self.downsample else 1)) self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels) self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=mid_channels) self.expand_bn3 = nn.BatchNorm2d(num_features=mid_channels) if self.use_se: self.se = SEBlock(channels=mid_channels) if downsample: self.dw_conv4 = depthwise_conv3x3( channels=in_channels, stride=2) self.dw_bn4 = nn.BatchNorm2d(num_features=in_channels) self.expand_conv5 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.expand_bn5 = nn.BatchNorm2d(num_features=mid_channels) self.activ = nn.ReLU(inplace=True) self.c_shuffle = ChannelShuffle( channels=out_channels, groups=2) def forward(self, x): if self.downsample: y1 = self.dw_conv4(x) y1 = self.dw_bn4(y1) y1 = self.expand_conv5(y1) y1 = self.expand_bn5(y1) y1 = self.activ(y1) x2 = x else: y1, x2 = torch.chunk(x, chunks=2, dim=1) y2 = self.compress_conv1(x2) y2 = self.compress_bn1(y2) y2 = self.activ(y2) y2 = self.dw_conv2(y2) y2 = self.dw_bn2(y2) y2 = self.expand_conv3(y2) y2 = self.expand_bn3(y2) y2 = self.activ(y2) if self.use_se: y2 = self.se(y2) if self.use_residual and not self.downsample: y2 = y2 + x2 x = torch.cat((y1, y2), dim=1) x = self.c_shuffle(x) return x class ShuffleInitBlock(nn.Module): """ ShuffleNetV2 specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0, ceil_mode=True) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ShuffleNetV2(nn.Module): """ ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. use_se : bool, default False Whether to use SE block. use_residual : bool, default False Whether to use residual connections. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, in_channels=3, in_size=(224, 224), num_classes=1000): super(ShuffleNetV2, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) stage.add_module("unit{}".format(j + 1), ShuffleUnit( in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_shufflenetv2(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ShuffleNetV2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 24 final_block_channels = 1024 layers = [4, 8, 4] channels_per_layers = [116, 232, 464] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if width_scale > 1.5: final_block_channels = int(final_block_channels * width_scale) net = ShuffleNetV2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shufflenetv2_wd2(**kwargs): """ ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs) def shufflenetv2_w1(**kwargs): """ ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs) def shufflenetv2_w3d2(**kwargs): """ ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs) def shufflenetv2_w2(**kwargs): """ ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ shufflenetv2_wd2, shufflenetv2_w1, shufflenetv2_w3d2, shufflenetv2_w2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenetv2_wd2 or weight_count == 1366792) assert (model != shufflenetv2_w1 or weight_count == 2278604) assert (model != shufflenetv2_w3d2 or weight_count == 4406098) assert (model != shufflenetv2_w2 or weight_count == 7601686) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,722
30.942779
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fishnet.py
""" FishNet for ImageNet-1K, implemented in PyTorch. Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,' http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf. """ __all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze'] import os import torch.nn as nn import torch.nn.init as init from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, Identity, InterpolationBlock from .preresnet import PreResActivation from .senet import SEInitBlock def channel_squeeze(x, groups): """ Channel squeeze operation. Parameters: ---------- x : Tensor Input tensor. groups : int Number of groups. Returns: ------- Tensor Resulted tensor. """ batch, channels, height, width = x.size() channels_per_group = channels // groups x = x.view(batch, channels_per_group, groups, height, width).sum(dim=2) return x class ChannelSqueeze(nn.Module): """ Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. """ def __init__(self, channels, groups): super(ChannelSqueeze, self).__init__() if channels % groups != 0: raise ValueError("channels must be divisible by groups") self.groups = groups def forward(self, x): return channel_squeeze(x, self.groups) class PreSEAttBlock(nn.Module): """ FishNet specific Squeeze-and-Excitation attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. reduction : int, default 16 Squeeze reduction value. """ def __init__(self, in_channels, out_channels, reduction=16): super(PreSEAttBlock, self).__init__() mid_cannels = out_channels // reduction self.bn = nn.BatchNorm2d(num_features=in_channels) self.relu = nn.ReLU(inplace=True) self.pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_cannels, bias=True) self.conv2 = conv1x1( in_channels=mid_cannels, out_channels=out_channels, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.bn(x) x = self.relu(x) x = self.pool(x) x = self.conv1(x) x = self.relu(x) x = self.conv2(x) x = self.sigmoid(x) return x class FishBottleneck(nn.Module): """ FishNet bottleneck block for residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int Dilation value for convolution layer. """ def __init__(self, in_channels, out_channels, stride, dilation): super(FishBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, padding=dilation, dilation=dilation) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class FishBlock(nn.Module): """ FishNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. squeeze : bool, default False Whether to use a channel squeeze operation. """ def __init__(self, in_channels, out_channels, stride=1, dilation=1, squeeze=False): super(FishBlock, self).__init__() self.squeeze = squeeze self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = FishBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation) if self.squeeze: assert (in_channels // 2 == out_channels) self.c_squeeze = ChannelSqueeze( channels=in_channels, groups=2) elif self.resize_identity: self.identity_conv = pre_conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x): if self.squeeze: identity = self.c_squeeze(x) elif self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity return x class DownUnit(nn.Module): """ FishNet down unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of output channels for each block. """ def __init__(self, in_channels, out_channels_list): super(DownUnit, self).__init__() self.blocks = nn.Sequential() for i, out_channels in enumerate(out_channels_list): self.blocks.add_module("block{}".format(i + 1), FishBlock( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels self.pool = nn.MaxPool2d( kernel_size=2, stride=2) def forward(self, x): x = self.blocks(x) x = self.pool(x) return x class UpUnit(nn.Module): """ FishNet up unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of output channels for each block. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. """ def __init__(self, in_channels, out_channels_list, dilation=1): super(UpUnit, self).__init__() self.blocks = nn.Sequential() for i, out_channels in enumerate(out_channels_list): squeeze = (dilation > 1) and (i == 0) self.blocks.add_module("block{}".format(i + 1), FishBlock( in_channels=in_channels, out_channels=out_channels, dilation=dilation, squeeze=squeeze)) in_channels = out_channels self.upsample = InterpolationBlock(scale_factor=2, mode="nearest", align_corners=None) def forward(self, x): x = self.blocks(x) x = self.upsample(x) return x class SkipUnit(nn.Module): """ FishNet skip connection unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of output channels for each block. """ def __init__(self, in_channels, out_channels_list): super(SkipUnit, self).__init__() self.blocks = nn.Sequential() for i, out_channels in enumerate(out_channels_list): self.blocks.add_module("block{}".format(i + 1), FishBlock( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels def forward(self, x): x = self.blocks(x) return x class SkipAttUnit(nn.Module): """ FishNet skip connection unit with attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of output channels for each block. """ def __init__(self, in_channels, out_channels_list): super(SkipAttUnit, self).__init__() mid_channels1 = in_channels // 2 mid_channels2 = 2 * in_channels self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels1) self.conv2 = pre_conv1x1_block( in_channels=mid_channels1, out_channels=mid_channels2, bias=True) in_channels = mid_channels2 self.se = PreSEAttBlock( in_channels=mid_channels2, out_channels=out_channels_list[-1]) self.blocks = nn.Sequential() for i, out_channels in enumerate(out_channels_list): self.blocks.add_module("block{}".format(i + 1), FishBlock( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels def forward(self, x): x = self.conv1(x) x = self.conv2(x) w = self.se(x) x = self.blocks(x) x = x * w + w return x class FishFinalBlock(nn.Module): """ FishNet final block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(FishFinalBlock, self).__init__() mid_channels = in_channels // 2 self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.preactiv = PreResActivation( in_channels=mid_channels) def forward(self, x): x = self.conv1(x) x = self.preactiv(x) return x class FishNet(nn.Module): """ FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,' http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf. Parameters: ---------- direct_channels : list of list of list of int Number of output channels for each unit along the straight path. skip_channels : list of list of list of int Number of output channels for each skip connection unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, direct_channels, skip_channels, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(FishNet, self).__init__() self.in_size = in_size self.num_classes = num_classes depth = len(direct_channels[0]) down1_channels = direct_channels[0] up_channels = direct_channels[1] down2_channels = direct_channels[2] skip1_channels = skip_channels[0] skip2_channels = skip_channels[1] self.features = nn.Sequential() self.features.add_module("init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels down1_seq = nn.Sequential() skip1_seq = nn.Sequential() for i in range(depth + 1): skip1_channels_list = skip1_channels[i] if i < depth: skip1_seq.add_module("unit{}".format(i + 1), SkipUnit( in_channels=in_channels, out_channels_list=skip1_channels_list)) down1_channels_list = down1_channels[i] down1_seq.add_module("unit{}".format(i + 1), DownUnit( in_channels=in_channels, out_channels_list=down1_channels_list)) in_channels = down1_channels_list[-1] else: skip1_seq.add_module("unit{}".format(i + 1), SkipAttUnit( in_channels=in_channels, out_channels_list=skip1_channels_list)) in_channels = skip1_channels_list[-1] up_seq = nn.Sequential() skip2_seq = nn.Sequential() for i in range(depth + 1): skip2_channels_list = skip2_channels[i] if i > 0: in_channels += skip1_channels[depth - i][-1] if i < depth: skip2_seq.add_module("unit{}".format(i + 1), SkipUnit( in_channels=in_channels, out_channels_list=skip2_channels_list)) up_channels_list = up_channels[i] dilation = 2 ** i up_seq.add_module("unit{}".format(i + 1), UpUnit( in_channels=in_channels, out_channels_list=up_channels_list, dilation=dilation)) in_channels = up_channels_list[-1] else: skip2_seq.add_module("unit{}".format(i + 1), Identity()) down2_seq = nn.Sequential() for i in range(depth): down2_channels_list = down2_channels[i] down2_seq.add_module("unit{}".format(i + 1), DownUnit( in_channels=in_channels, out_channels_list=down2_channels_list)) in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1] self.features.add_module("hg", SesquialteralHourglass( down1_seq=down1_seq, skip1_seq=skip1_seq, up_seq=up_seq, skip2_seq=skip2_seq, down2_seq=down2_seq)) self.features.add_module("final_block", FishFinalBlock(in_channels=in_channels)) in_channels = in_channels // 2 self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module("final_conv", conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_fishnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create FishNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 99: direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]] skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]] elif blocks == 150: direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]] skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]] else: raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks)) direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]] skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]] direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in ([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])] skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in ([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])] init_block_channels = 64 net = FishNet( direct_channels=direct_channels, skip_channels=skip_channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fishnet99(**kwargs): """ FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,' http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fishnet(blocks=99, model_name="fishnet99", **kwargs) def fishnet150(**kwargs): """ FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,' http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fishnet(blocks=150, model_name="fishnet150", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ fishnet99, fishnet150, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fishnet99 or weight_count == 16628904) assert (model != fishnet150 or weight_count == 24959400) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
19,302
30.033762
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/hrnet.py
""" HRNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. """ __all__ = ['hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32', 'hrnetv2_w40', 'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64'] import os import torch.nn as nn from .common import conv1x1_block, conv3x3_block, Identity from .resnet import ResUnit class UpSamplingBlock(nn.Module): """ HFNet specific upsampling block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : int Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, scale_factor): super(UpSamplingBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=1, activation=None) self.upsample = nn.Upsample( scale_factor=scale_factor, mode="nearest") def forward(self, x): x = self.conv(x) x = self.upsample(x) return x class HRBlock(nn.Module): """ HFNet block. Parameters: ---------- in_channels_list : list of int Number of input channels. out_channels_list : list of int Number of output channels. num_branches : int Number of branches. num_subblocks : list of int Number of subblock. """ def __init__(self, in_channels_list, out_channels_list, num_branches, num_subblocks): super(HRBlock, self).__init__() self.in_channels_list = in_channels_list self.num_branches = num_branches self.branches = nn.Sequential() for i in range(num_branches): layers = nn.Sequential() in_channels_i = self.in_channels_list[i] out_channels_i = out_channels_list[i] for j in range(num_subblocks[i]): layers.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels_i, out_channels=out_channels_i, stride=1, bottleneck=False)) in_channels_i = out_channels_i self.in_channels_list[i] = out_channels_i self.branches.add_module("branch{}".format(i + 1), layers) if num_branches > 1: self.fuse_layers = nn.Sequential() for i in range(num_branches): fuse_layer = nn.Sequential() for j in range(num_branches): if j > i: fuse_layer.add_module("block{}".format(j + 1), UpSamplingBlock( in_channels=in_channels_list[j], out_channels=in_channels_list[i], scale_factor=2 ** (j - i))) elif j == i: fuse_layer.add_module("block{}".format(j + 1), Identity()) else: conv3x3_seq = nn.Sequential() for k in range(i - j): if k == i - j - 1: conv3x3_seq.add_module("subblock{}".format(k + 1), conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[i], stride=2, activation=None)) else: conv3x3_seq.add_module("subblock{}".format(k + 1), conv3x3_block( in_channels=in_channels_list[j], out_channels=in_channels_list[j], stride=2)) fuse_layer.add_module("block{}".format(j + 1), conv3x3_seq) self.fuse_layers.add_module("layer{}".format(i + 1), fuse_layer) self.activ = nn.ReLU(True) def forward(self, x): for i in range(self.num_branches): x[i] = self.branches[i](x[i]) if self.num_branches == 1: return x x_fuse = [] for i in range(len(self.fuse_layers)): y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) for j in range(1, self.num_branches): if i == j: y = y + x[j] else: y = y + self.fuse_layers[i][j](x[j]) x_fuse.append(self.activ(y)) return x_fuse class HRStage(nn.Module): """ HRNet stage block. Parameters: ---------- in_channels_list : list of int Number of output channels from the previous layer. out_channels_list : list of int Number of output channels in the current layer. num_modules : int Number of modules. num_branches : int Number of branches. num_subblocks : list of int Number of subblocks. """ def __init__(self, in_channels_list, out_channels_list, num_modules, num_branches, num_subblocks): super(HRStage, self).__init__() self.branches = num_branches self.in_channels_list = out_channels_list in_branches = len(in_channels_list) out_branches = len(out_channels_list) self.transition = nn.Sequential() for i in range(out_branches): if i < in_branches: if out_channels_list[i] != in_channels_list[i]: self.transition.add_module("block{}".format(i + 1), conv3x3_block( in_channels=in_channels_list[i], out_channels=out_channels_list[i], stride=1)) else: self.transition.add_module("block{}".format(i + 1), Identity()) else: conv3x3_seq = nn.Sequential() for j in range(i + 1 - in_branches): in_channels_i = in_channels_list[-1] out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i conv3x3_seq.add_module("subblock{}".format(j + 1), conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, stride=2)) self.transition.add_module("block{}".format(i + 1), conv3x3_seq) self.layers = nn.Sequential() for i in range(num_modules): self.layers.add_module("block{}".format(i + 1), HRBlock( in_channels_list=self.in_channels_list, out_channels_list=out_channels_list, num_branches=num_branches, num_subblocks=num_subblocks)) self.in_channels_list = self.layers[-1].in_channels_list def forward(self, x): x_list = [] for j in range(self.branches): if not isinstance(self.transition[j], Identity): x_list.append(self.transition[j](x[-1] if type(x) is list else x)) else: x_list_j = x[j] if type(x) is list else x x_list.append(x_list_j) y_list = self.layers(x_list) return y_list class HRInitBlock(nn.Module): """ HRNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. num_subblocks : int Number of subblocks. """ def __init__(self, in_channels, out_channels, mid_channels, num_subblocks): super(HRInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=2) in_channels = mid_channels self.subblocks = nn.Sequential() for i in range(num_subblocks): self.subblocks.add_module("block{}".format(i + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=1, bottleneck=True)) in_channels = out_channels def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.subblocks(x) return x class HRFinalBlock(nn.Module): """ HRNet specific final block. Parameters: ---------- in_channels_list : list of int Number of input channels per stage. out_channels_list : list of int Number of output channels per stage. """ def __init__(self, in_channels_list, out_channels_list): super(HRFinalBlock, self).__init__() self.inc_blocks = nn.Sequential() for i, in_channels_i in enumerate(in_channels_list): self.inc_blocks.add_module("block{}".format(i + 1), ResUnit( in_channels=in_channels_i, out_channels=out_channels_list[i], stride=1, bottleneck=True)) self.down_blocks = nn.Sequential() for i in range(len(in_channels_list) - 1): self.down_blocks.add_module("block{}".format(i + 1), conv3x3_block( in_channels=out_channels_list[i], out_channels=out_channels_list[i + 1], stride=2, bias=True)) self.final_layer = conv1x1_block( in_channels=1024, out_channels=2048, stride=1, bias=True) def forward(self, x): y = self.inc_blocks[0](x[0]) for i in range(len(self.down_blocks)): y = self.inc_blocks[i + 1](x[i + 1]) + self.down_blocks[i](y) y = self.final_layer(y) return y class HRNet(nn.Module): """ HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- channels : list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. init_num_subblocks : int Number of subblocks in the initial unit. num_modules : int Number of modules per stage. num_subblocks : list of int Number of subblocks per stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, init_num_subblocks, num_modules, num_subblocks, in_channels=3, in_size=(224, 224), num_classes=1000): super(HRNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.branches = [2, 3, 4] self.features = nn.Sequential() self.features.add_module("init_block", HRInitBlock( in_channels=in_channels, out_channels=init_block_channels, mid_channels=64, num_subblocks=init_num_subblocks)) in_channels_list = [init_block_channels] for i in range(len(self.branches)): self.features.add_module("stage{}".format(i + 1), HRStage( in_channels_list=in_channels_list, out_channels_list=channels[i], num_modules=num_modules[i], num_branches=self.branches[i], num_subblocks=num_subblocks[i])) in_channels_list = self.features[-1].in_channels_list self.features.add_module("final_block", HRFinalBlock( in_channels_list=in_channels_list, out_channels_list=[128, 256, 512, 1024])) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=2048, out_features=num_classes) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_hrnet(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create HRNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('s' or 'm'). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "w18s1": init_block_channels = 128 init_num_subblocks = 1 channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]] num_modules = [1, 1, 1] elif version == "w18s2": init_block_channels = 256 init_num_subblocks = 2 channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]] num_modules = [1, 3, 2] elif version == "w18": init_block_channels = 256 init_num_subblocks = 4 channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]] num_modules = [1, 4, 3] elif version == "w30": init_block_channels = 256 init_num_subblocks = 4 channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]] num_modules = [1, 4, 3] elif version == "w32": init_block_channels = 256 init_num_subblocks = 4 channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]] num_modules = [1, 4, 3] elif version == "w40": init_block_channels = 256 init_num_subblocks = 4 channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]] num_modules = [1, 4, 3] elif version == "w44": init_block_channels = 256 init_num_subblocks = 4 channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]] num_modules = [1, 4, 3] elif version == "w48": init_block_channels = 256 init_num_subblocks = 4 channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]] num_modules = [1, 4, 3] elif version == "w64": init_block_channels = 256 init_num_subblocks = 4 channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]] num_modules = [1, 4, 3] else: raise ValueError("Unsupported HRNet version {}".format(version)) num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels] net = HRNet( channels=channels, init_block_channels=init_block_channels, init_num_subblocks=init_num_subblocks, num_modules=num_modules, num_subblocks=num_subblocks, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def hrnet_w18_small_v1(**kwargs): """ HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs) def hrnet_w18_small_v2(**kwargs): """ HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs) def hrnetv2_w18(**kwargs): """ HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs) def hrnetv2_w30(**kwargs): """ HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs) def hrnetv2_w32(**kwargs): """ HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs) def hrnetv2_w40(**kwargs): """ HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs) def hrnetv2_w44(**kwargs): """ HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs) def hrnetv2_w48(**kwargs): """ HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs) def hrnetv2_w64(**kwargs): """ HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,' https://arxiv.org/abs/1908.07919. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ hrnet_w18_small_v1, hrnet_w18_small_v2, hrnetv2_w18, hrnetv2_w30, hrnetv2_w32, hrnetv2_w40, hrnetv2_w44, hrnetv2_w48, hrnetv2_w64, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != hrnet_w18_small_v1 or weight_count == 13187464) assert (model != hrnet_w18_small_v2 or weight_count == 15597464) assert (model != hrnetv2_w18 or weight_count == 21299004) assert (model != hrnetv2_w30 or weight_count == 37712220) assert (model != hrnetv2_w32 or weight_count == 41232680) assert (model != hrnetv2_w40 or weight_count == 57557160) assert (model != hrnetv2_w44 or weight_count == 67064984) assert (model != hrnetv2_w48 or weight_count == 77469864) assert (model != hrnetv2_w64 or weight_count == 128059944) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
22,226
32.83105
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fcn8sd.py
""" FCN-8s(d) for image segmentation, implemented in PyTorch. Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. """ __all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco', 'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k', 'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes'] import os import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import conv1x1, conv3x3_block from .resnetd import resnetd50b, resnetd101b class FCNFinalBlock(nn.Module): """ FCN-8s(d) final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, bottleneck_factor=4): super(FCNFinalBlock, self).__init__() assert (in_channels % bottleneck_factor == 0) mid_channels = in_channels // bottleneck_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x, out_size): x = self.conv1(x) x = self.dropout(x) x = self.conv2(x) x = F.interpolate(x, size=out_size, mode="bilinear", align_corners=True) return x class FCN8sd(nn.Module): """ FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. It is an experimental model mixed FCN-8s and PSPNet. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. num_classes : int, default 21 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21): super(FCN8sd, self).__init__() assert (in_channels > 0) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.backbone = backbone pool_out_channels = backbone_out_channels self.final_block = FCNFinalBlock( in_channels=pool_out_channels, out_channels=num_classes) if self.aux: aux_out_channels = backbone_out_channels // 2 self.aux_block = FCNFinalBlock( in_channels=aux_out_channels, out_channels=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, y = self.backbone(x) x = self.final_block(x, in_size) if self.aux: y = self.aux_block(y, in_size) return x, y else: return x def get_fcn8sd(backbone, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create FCN-8s(d) model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = FCN8sd( backbone=backbone, num_classes=num_classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fcn8sd_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_voc", **kwargs) def fcn8sd_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_voc", **kwargs) def fcn8sd_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_coco", **kwargs) def fcn8sd_resnetd101b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 21 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_coco", **kwargs) def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k", **kwargs) def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 150 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k", **kwargs) def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes", **kwargs) def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (480, 480) aux = True pretrained = False models = [ (fcn8sd_resnetd50b_voc, 21), (fcn8sd_resnetd101b_voc, 21), (fcn8sd_resnetd50b_coco, 21), (fcn8sd_resnetd101b_coco, 21), (fcn8sd_resnetd50b_ade20k, 150), (fcn8sd_resnetd101b_ade20k, 150), (fcn8sd_resnetd50b_cityscapes, 19), (fcn8sd_resnetd101b_cityscapes, 19), ] for model, num_classes in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994) assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122) assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994) assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122) assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324) assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452) assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454) assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582) else: assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789) assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917) assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789) assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917) assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966) assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094) assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763) assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891) x = torch.randn(1, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
16,126
37.125296
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/selecsls.py
""" SelecSLS for ImageNet-1K, implemented in PyTorch. Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. """ __all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, DualPathSequential class SelecSLSBlock(nn.Module): """ SelecSLS block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(SelecSLSBlock, self).__init__() mid_channels = 2 * out_channels self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class SelecSLSUnit(nn.Module): """ SelecSLS unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. skip_channels : int Number of skipped channels. mid_channels : int Number of middle channels. stride : int or tuple/list of 2 int Strides of the branch convolution layers. """ def __init__(self, in_channels, out_channels, skip_channels, mid_channels, stride): super(SelecSLSUnit, self).__init__() self.resize = (stride == 2) mid2_channels = mid_channels // 2 last_channels = 2 * mid_channels + (skip_channels if stride == 1 else 0) self.branch1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.branch2 = SelecSLSBlock( in_channels=mid_channels, out_channels=mid2_channels) self.branch3 = SelecSLSBlock( in_channels=mid2_channels, out_channels=mid2_channels) self.last_conv = conv1x1_block( in_channels=last_channels, out_channels=out_channels) def forward(self, x, x0): x1 = self.branch1(x) x2 = self.branch2(x1) x3 = self.branch3(x2) if self.resize: y = torch.cat((x1, x2, x3), dim=1) y = self.last_conv(y) return y, y else: y = torch.cat((x1, x2, x3, x0), dim=1) y = self.last_conv(y) return y, x0 class SelecSLS(nn.Module): """ SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- channels : list of list of int Number of output channels for each unit. skip_channels : list of list of int Number of skipped channels for each unit. mid_channels : list of list of int Number of middle channels for each unit. kernels3 : list of list of int/bool Using 3x3 (instead of 1x1) kernel for each head unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, skip_channels, mid_channels, kernels3, in_channels=3, in_size=(224, 224), num_classes=1000): super(SelecSLS, self).__init__() self.in_size = in_size self.num_classes = num_classes init_block_channels = 32 self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=(1 + len(kernels3))) self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): k = i - len(skip_channels) stage = DualPathSequential() if k < 0 else nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if j == 0 else 1 if k < 0: unit = SelecSLSUnit( in_channels=in_channels, out_channels=out_channels, skip_channels=skip_channels[i][j], mid_channels=mid_channels[i][j], stride=stride) else: conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block unit = conv_block_class( in_channels=in_channels, out_channels=out_channels, stride=stride) stage.add_module("unit{}".format(j + 1), unit) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=4, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_selecsls(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SelecSLS model with specific parameters. Parameters: ---------- version : str Version of SelecSLS. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version in ("42", "42b"): channels = [[64, 128], [144, 288], [304, 480]] skip_channels = [[0, 64], [0, 144], [0, 304]] mid_channels = [[64, 64], [144, 144], [304, 304]] kernels3 = [[1, 1], [1, 0]] if version == "42": head_channels = [[960, 1024], [1024, 1280]] else: head_channels = [[960, 1024], [1280, 1024]] elif version in ("60", "60b"): channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]] skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]] mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]] kernels3 = [[1, 1], [1, 0]] if version == "60": head_channels = [[756, 1024], [1024, 1280]] else: head_channels = [[756, 1024], [1280, 1024]] elif version == "84": channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]] skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]] mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]] kernels3 = [[1, 1], [1, 1]] head_channels = [[960, 1024], [1024, 1280]] else: raise ValueError("Unsupported SelecSLS version {}".format(version)) channels += head_channels net = SelecSLS( channels=channels, skip_channels=skip_channels, mid_channels=mid_channels, kernels3=kernels3, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def selecsls42(**kwargs): """ SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_selecsls(version="42", model_name="selecsls42", **kwargs) def selecsls42b(**kwargs): """ SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_selecsls(version="42b", model_name="selecsls42b", **kwargs) def selecsls60(**kwargs): """ SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_selecsls(version="60", model_name="selecsls60", **kwargs) def selecsls60b(**kwargs): """ SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_selecsls(version="60b", model_name="selecsls60b", **kwargs) def selecsls84(**kwargs): """ SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,' https://arxiv.org/abs/1907.00837. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_selecsls(version="84", model_name="selecsls84", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ selecsls42, selecsls42b, selecsls60, selecsls60b, selecsls84, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != selecsls42 or weight_count == 30354952) assert (model != selecsls42b or weight_count == 32458248) assert (model != selecsls60 or weight_count == 30670768) assert (model != selecsls60b or weight_count == 32774064) assert (model != selecsls84 or weight_count == 50954600) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,347
31.580475
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/inceptionv4.py
""" InceptionV4 for ImageNet-1K, implemented in PyTorch. Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. """ __all__ = ['InceptionV4', 'inceptionv4'] import os import torch import torch.nn as nn from .common import ConvBlock, conv3x3_block, Concurrent from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch class Conv3x3Branch(nn.Module): """ InceptionV4 specific convolutional 3x3 branch block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(Conv3x3Branch, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, padding=0, bn_eps=bn_eps) def forward(self, x): x = self.conv(x) return x class ConvSeq3x3Branch(nn.Module): """ InceptionV4 specific convolutional sequence branch block with splitting by 3x3. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels_list : list of tuple of int List of numbers of output channels for middle layers. kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int List of convolution window sizes. strides_list : list of tuple of int or tuple of tuple/list of 2 int List of strides of the convolution. padding_list : list of tuple of int or tuple of tuple/list of 2 int List of padding values for convolution layers. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, mid_channels_list, kernel_size_list, strides_list, padding_list, bn_eps): super(ConvSeq3x3Branch, self).__init__() self.conv_list = nn.Sequential() for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip( mid_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=strides, padding=padding, bn_eps=bn_eps)) in_channels = mid_channels self.conv1x3 = ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 3), stride=1, padding=(0, 1), bn_eps=bn_eps) self.conv3x1 = ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 1), stride=1, padding=(1, 0), bn_eps=bn_eps) def forward(self, x): x = self.conv_list(x) y1 = self.conv1x3(x) y2 = self.conv3x1(x) x = torch.cat((y1, y2), dim=1) return x class InceptionAUnit(nn.Module): """ InceptionV4 type Inception-A unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptionAUnit, self).__init__() in_channels = 384 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=96, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=96, bn_eps=bn_eps, count_include_pad=False)) def forward(self, x): x = self.branches(x) return x class ReductionAUnit(nn.Module): """ InceptionV4 type Reduction-A unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(ReductionAUnit, self).__init__() in_channels = 384 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionBUnit(nn.Module): """ InceptionV4 type Inception-B unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptionBUnit, self).__init__() in_channels = 1024 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=384, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 224, 256), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192, 224, 224, 256), kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)), strides_list=(1, 1, 1, 1, 1), padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=128, bn_eps=bn_eps, count_include_pad=False)) def forward(self, x): x = self.branches(x) return x class ReductionBUnit(nn.Module): """ InceptionV4 type Reduction-B unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(ReductionBUnit, self).__init__() in_channels = 1024 self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=(192, 192), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(256, 256, 320, 320), kernel_size_list=(1, (1, 7), (7, 1), 3), strides_list=(1, 1, 1, 2), padding_list=(0, (0, 3), (3, 0), 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptionCUnit(nn.Module): """ InceptionV4 type Inception-C unit. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptionCUnit, self).__init__() in_channels = 1536 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=256, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeq3x3Branch( in_channels=in_channels, out_channels=256, mid_channels_list=(384,), kernel_size_list=(1,), strides_list=(1,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeq3x3Branch( in_channels=in_channels, out_channels=256, mid_channels_list=(384, 448, 512), kernel_size_list=(1, (3, 1), (1, 3)), strides_list=(1, 1, 1), padding_list=(0, (1, 0), (0, 1)), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=256, bn_eps=bn_eps, count_include_pad=False)) def forward(self, x): x = self.branches(x) return x class InceptBlock3a(nn.Module): """ InceptionV4 type Mixed-3a block. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptBlock3a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", MaxPoolBranch()) self.branches.add_module("branch2", Conv3x3Branch( in_channels=64, out_channels=96, bn_eps=bn_eps)) def forward(self, x): x = self.branches(x) return x class InceptBlock4a(nn.Module): """ InceptionV4 type Mixed-4a block. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptBlock4a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=160, out_channels_list=(64, 96), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=160, out_channels_list=(64, 64, 64, 96), kernel_size_list=(1, (1, 7), (7, 1), 3), strides_list=(1, 1, 1, 1), padding_list=(0, (0, 3), (3, 0), 0), bn_eps=bn_eps)) def forward(self, x): x = self.branches(x) return x class InceptBlock5a(nn.Module): """ InceptionV4 type Mixed-5a block. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptBlock5a, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", Conv3x3Branch( in_channels=192, out_channels=192, bn_eps=bn_eps)) self.branches.add_module("branch2", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptInitBlock(nn.Module): """ InceptionV4 specific initial block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.block1 = InceptBlock3a(bn_eps=bn_eps) self.block2 = InceptBlock4a(bn_eps=bn_eps) self.block3 = InceptBlock5a(bn_eps=bn_eps) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) return x class InceptionV4(nn.Module): """ InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, dropout_rate=0.0, bn_eps=1e-5, in_channels=3, in_size=(299, 299), num_classes=1000): super(InceptionV4, self).__init__() self.in_size = in_size self.num_classes = num_classes layers = [4, 8, 4] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", InceptInitBlock( in_channels=in_channels, bn_eps=bn_eps)) for i, layers_per_stage in enumerate(layers): stage = nn.Sequential() for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] else: unit = normal_units[i] stage.add_module("unit{}".format(j + 1), unit(bn_eps=bn_eps)) self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Sequential() if dropout_rate > 0.0: self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=1536, out_features=num_classes)) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_inceptionv4(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create InceptionV4 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = InceptionV4(**kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def inceptionv4(**kwargs): """ InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_inceptionv4(model_name="inceptionv4", bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ inceptionv4, ] for model in models: net = model(pretrained=pretrained) # net.train() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != InceptionV4 or weight_count == 42679816) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
17,876
28.944724
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/regnet.py
""" RegNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. """ __all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040', 'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004', 'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080', 'regnety120', 'regnety160', 'regnety320'] import os import numpy as np import torch.nn as nn from .common import conv1x1_block, conv3x3_block, SEBlock class RegNetBottleneck(nn.Module): """ RegNet bottleneck block for residual path in RegNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. groups : int Number of groups. use_se : bool Whether to use SE-module. bottleneck_factor : int, default 1 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, groups, use_se, bottleneck_factor=1): super(RegNetBottleneck, self).__init__() self.use_se = use_se mid_channels = out_channels // bottleneck_factor mid_groups = mid_channels // groups self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, groups=mid_groups) if self.use_se: self.se = SEBlock( channels=mid_channels, mid_channels=(in_channels // 4)) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_se: x = self.se(x) x = self.conv3(x) return x class RegNetUnit(nn.Module): """ RegNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. groups : int Number of groups. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, groups, use_se): super(RegNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = RegNetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, groups=groups, use_se=use_se) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class RegNet(nn.Module): """ RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : list of int Number of groups for each stage. use_se : bool Whether to use SE-module. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, groups, use_se, in_channels=3, in_size=(224, 224), num_classes=1000): super(RegNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, padding=1)) in_channels = init_block_channels for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 stage.add_module("unit{}".format(j + 1), RegNetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, groups=groups_per_stage, use_se=use_se)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_regnet(channels_init, channels_slope, channels_mult, depth, groups, use_se=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create RegNet model with specific parameters. Parameters: ---------- channels_init : float Initial value for channels/widths. channels_slope : float Slope value for channels/widths. width_mult : float Width multiplier value. groups : int Number of groups. depth : int Depth value. use_se : bool, default False Whether to use SE-module. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ divisor = 8 assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0) # Generate continuous per-block channels/widths: channels_cont = np.arange(depth) * channels_slope + channels_init # Generate quantized per-block channels/widths: channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult)) channels = channels_init * np.power(channels_mult, channels_exps) channels = (np.round(channels / divisor) * divisor).astype(np.int) # Generate per stage channels/widths and layers/depths: channels_per_stage, layers = np.unique(channels, return_counts=True) # Adjusts the compatibility of channels/widths and groups: groups_per_stage = [min(groups, c) for c in channels_per_stage] channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)] channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)] init_block_channels = 32 net = RegNet( channels=channels, init_block_channels=init_block_channels, groups=groups_per_stage, use_se=use_se, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def regnetx002(**kwargs): """ RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, model_name="regnetx002", **kwargs) def regnetx004(**kwargs): """ RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16, model_name="regnetx004", **kwargs) def regnetx006(**kwargs): """ RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24, model_name="regnetx006", **kwargs) def regnetx008(**kwargs): """ RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16, model_name="regnetx008", **kwargs) def regnetx016(**kwargs): """ RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24, model_name="regnetx016", **kwargs) def regnetx032(**kwargs): """ RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48, model_name="regnetx032", **kwargs) def regnetx040(**kwargs): """ RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40, model_name="regnetx040", **kwargs) def regnetx064(**kwargs): """ RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56, model_name="regnetx064", **kwargs) def regnetx080(**kwargs): """ RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120, model_name="regnetx080", **kwargs) def regnetx120(**kwargs): """ RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, model_name="regnetx120", **kwargs) def regnetx160(**kwargs): """ RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128, model_name="regnetx160", **kwargs) def regnetx320(**kwargs): """ RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168, model_name="regnetx320", **kwargs) def regnety002(**kwargs): """ RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True, model_name="regnety002", **kwargs) def regnety004(**kwargs): """ RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True, model_name="regnety004", **kwargs) def regnety006(**kwargs): """ RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True, model_name="regnety006", **kwargs) def regnety008(**kwargs): """ RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True, model_name="regnety008", **kwargs) def regnety016(**kwargs): """ RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True, model_name="regnety016", **kwargs) def regnety032(**kwargs): """ RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True, model_name="regnety032", **kwargs) def regnety040(**kwargs): """ RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True, model_name="regnety040", **kwargs) def regnety064(**kwargs): """ RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True, model_name="regnety064", **kwargs) def regnety080(**kwargs): """ RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True, model_name="regnety080", **kwargs) def regnety120(**kwargs): """ RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True, model_name="regnety120", **kwargs) def regnety160(**kwargs): """ RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True, model_name="regnety160", **kwargs) def regnety320(**kwargs): """ RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True, model_name="regnety320", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ regnetx002, regnetx004, regnetx006, regnetx008, regnetx016, regnetx032, regnetx040, regnetx064, regnetx080, regnetx120, regnetx160, regnetx320, regnety002, regnety004, regnety006, regnety008, regnety016, regnety032, regnety040, regnety064, regnety080, regnety120, regnety160, regnety320, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != regnetx002 or weight_count == 2684792) assert (model != regnetx004 or weight_count == 5157512) assert (model != regnetx006 or weight_count == 6196040) assert (model != regnetx008 or weight_count == 7259656) assert (model != regnetx016 or weight_count == 9190136) assert (model != regnetx032 or weight_count == 15296552) assert (model != regnetx040 or weight_count == 22118248) assert (model != regnetx064 or weight_count == 26209256) assert (model != regnetx080 or weight_count == 39572648) assert (model != regnetx120 or weight_count == 46106056) assert (model != regnetx160 or weight_count == 54278536) assert (model != regnetx320 or weight_count == 107811560) assert (model != regnety002 or weight_count == 3162996) assert (model != regnety004 or weight_count == 4344144) assert (model != regnety006 or weight_count == 6055160) assert (model != regnety008 or weight_count == 6263168) assert (model != regnety016 or weight_count == 11202430) assert (model != regnety032 or weight_count == 19436338) assert (model != regnety040 or weight_count == 20646656) assert (model != regnety064 or weight_count == 30583252) assert (model != regnety080 or weight_count == 39180068) assert (model != regnety120 or weight_count == 51822544) assert (model != regnety160 or weight_count == 83590140) assert (model != regnety320 or weight_count == 145046770) batch = 14 size = 224 x = torch.randn(batch, 3, size, size) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, 1000)) if __name__ == "__main__": _test()
24,321
32.874652
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/icnet.py
""" ICNet for image segmentation, implemented in PyTorch. Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,' https://arxiv.org/abs/1704.08545. """ __all__ = ['ICNet', 'icnet_resnetd50b_cityscapes'] import os import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential from .pspnet import PyramidPooling from .resnetd import resnetd50b class ICInitBlock(nn.Module): """ ICNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ICInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=2) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class PSPBlock(nn.Module): """ ICNet specific PSPNet reduced head block. Parameters: ---------- in_channels : int Number of input channels. upscale_out_size : tuple of 2 int Spatial size of the input tensor for the bilinear upsampling operation. bottleneck_factor : int Bottleneck factor. """ def __init__(self, in_channels, upscale_out_size, bottleneck_factor): super(PSPBlock, self).__init__() assert (in_channels % bottleneck_factor == 0) mid_channels = in_channels // bottleneck_factor self.pool = PyramidPooling( in_channels=in_channels, upscale_out_size=upscale_out_size) self.conv = conv3x3_block( in_channels=4096, out_channels=mid_channels) self.dropout = nn.Dropout(p=0.1, inplace=False) def forward(self, x): x = self.pool(x) x = self.conv(x) x = self.dropout(x) return x class CFFBlock(nn.Module): """ Cascade Feature Fusion block. Parameters: ---------- in_channels_low : int Number of input channels (low input). in_channels_high : int Number of input channels (low high). out_channels : int Number of output channels. num_classes : int Number of classification classes. """ def __init__(self, in_channels_low, in_channels_high, out_channels, num_classes): super(CFFBlock, self).__init__() self.up = InterpolationBlock(scale_factor=2) self.conv_low = conv3x3_block( in_channels=in_channels_low, out_channels=out_channels, padding=2, dilation=2, activation=None) self.conv_hign = conv1x1_block( in_channels=in_channels_high, out_channels=out_channels, activation=None) self.activ = nn.ReLU(inplace=True) self.conv_cls = conv1x1( in_channels=out_channels, out_channels=num_classes) def forward(self, xl, xh): xl = self.up(xl) xl = self.conv_low(xl) xh = self.conv_hign(xh) x = xl + xh x = self.activ(x) x_cls = self.conv_cls(xl) return x, x_cls class ICHeadBlock(nn.Module): """ ICNet head block. Parameters: ---------- num_classes : int Number of classification classes. """ def __init__(self, num_classes): super(ICHeadBlock, self).__init__() self.cff_12 = CFFBlock( in_channels_low=128, in_channels_high=64, out_channels=128, num_classes=num_classes) self.cff_24 = CFFBlock( in_channels_low=256, in_channels_high=256, out_channels=128, num_classes=num_classes) self.up_x2 = InterpolationBlock(scale_factor=2) self.up_x8 = InterpolationBlock(scale_factor=4) self.conv_cls = conv1x1( in_channels=128, out_channels=num_classes) def forward(self, x1, x2, x4): outputs = [] x_cff_24, x_24_cls = self.cff_24(x4, x2) outputs.append(x_24_cls) x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1) outputs.append(x_12_cls) up_x2 = self.up_x2(x_cff_12) up_x2 = self.conv_cls(up_x2) outputs.append(up_x2) up_x8 = self.up_x8(up_x2) outputs.append(up_x8) # 1 -> 1/4 -> 1/8 -> 1/16 outputs.reverse() return tuple(outputs) class ICNet(nn.Module): """ ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,' https://arxiv.org/abs/1704.08545. Parameters: ---------- backbones : tuple of nn.Sequential Feature extractors. backbones_out_channels : tuple of int Number of output channels form each feature extractor. num_classes : tuple of int Number of output channels for each branch. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. num_classes : int, default 21 Number of segmentation classes. """ def __init__(self, backbones, backbones_out_channels, channels, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21): super(ICNet, self).__init__() assert (in_channels > 0) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None psp_head_out_channels = 512 self.branch1 = ICInitBlock( in_channels=in_channels, out_channels=channels[0]) self.branch2 = MultiOutputSequential() self.branch2.add_module("down1", InterpolationBlock(scale_factor=2, up=False)) backbones[0].do_output = True self.branch2.add_module("backbones1", backbones[0]) self.branch2.add_module("down2", InterpolationBlock(scale_factor=2, up=False)) self.branch2.add_module("backbones2", backbones[1]) self.branch2.add_module("psp", PSPBlock( in_channels=backbones_out_channels[1], upscale_out_size=psp_pool_out_size, bottleneck_factor=4)) self.branch2.add_module("final_block", conv1x1_block( in_channels=psp_head_out_channels, out_channels=channels[2])) self.conv_y2 = conv1x1_block( in_channels=backbones_out_channels[0], out_channels=channels[1]) self.final_block = ICHeadBlock(num_classes=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): y1 = self.branch1(x) y3, y2 = self.branch2(x) y2 = self.conv_y2(y2) x = self.final_block(y1, y2, y3) if self.aux: return x else: return x[0] def get_icnet(backbones, backbones_out_channels, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ICNet model with specific parameters. Parameters: ---------- backbones : tuple of nn.Sequential Feature extractors. backbones_out_channels : tuple of int Number of output channels form each feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = (64, 256, 256) backbones[0].multi_output = False backbones[1].multi_output = False net = ICNet( backbones=backbones, backbones_out_channels=backbones_out_channels, channels=channels, num_classes=num_classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def icnet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,' https://arxiv.org/abs/1704.08545. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone1 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features for i in range(len(backbone1) - 3): del backbone1[-1] backbone2 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features del backbone2[-1] for i in range(3): del backbone2[0] backbones = (backbone1, backbone2) backbones_out_channels = (512, 2048) return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, num_classes=num_classes, aux=aux, model_name="icnet_resnetd50b_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (480, 480) aux = False fixed_size = False pretrained = False models = [ (icnet_resnetd50b_cityscapes, 19), ] for model, num_classes in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184) x = torch.randn(1, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
12,295
29.894472
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mobilenetb.py
""" MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in Gluon. Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. """ __all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4'] from .mobilenet import get_mobilenet def mobilenetb_w1(**kwargs): """ 1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs) def mobilenetb_w3d4(**kwargs): """ 0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs) def mobilenetb_wd2(**kwargs): """ 0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs) def mobilenetb_wd4(**kwargs): """ 0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mobilenetb_w1, mobilenetb_w3d4, mobilenetb_wd2, mobilenetb_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetb_w1 or weight_count == 4222056) assert (model != mobilenetb_w3d4 or weight_count == 2578120) assert (model != mobilenetb_wd2 or weight_count == 1326632) assert (model != mobilenetb_wd4 or weight_count == 467592) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
3,794
32.289474
113
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/shakedropresnet_cifar.py
""" ShakeDrop-ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375. """ __all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResBlock, ResBottleneck class ShakeDrop(torch.autograd.Function): """ ShakeDrop function. """ @staticmethod def forward(ctx, x, b, alpha): y = (b + alpha - b * alpha) * x ctx.save_for_backward(b) return y @staticmethod def backward(ctx, dy): beta = torch.rand(dy.size(0), dtype=dy.dtype, device=dy.device).view(-1, 1, 1, 1) b, = ctx.saved_tensors return (b + beta - b * beta) * dy, None, None class ShakeDropResUnit(nn.Module): """ ShakeDrop-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. life_prob : float Residual branch life probability. """ def __init__(self, in_channels, out_channels, stride, bottleneck, life_prob): super(ShakeDropResUnit, self).__init__() self.life_prob = life_prob self.resize_identity = (in_channels != out_channels) or (stride != 1) body_class = ResBottleneck if bottleneck else ResBlock self.body = body_class( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) self.shake_drop = ShakeDrop.apply def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.training: b = torch.bernoulli(torch.full((1,), self.life_prob, dtype=x.dtype, device=x.device)) alpha = torch.empty(x.size(0), dtype=x.dtype, device=x.device).view(-1, 1, 1, 1).uniform_(-1.0, 1.0) x = self.shake_drop(x, b, alpha) else: x = self.life_prob * x x = x + identity x = self.activ(x) return x class CIFARShakeDropResNet(nn.Module): """ ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. life_probs : list of float Residual branch life probability for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, life_probs, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARShakeDropResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels k = 0 for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ShakeDropResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, life_prob=life_probs[k])) in_channels = out_channels k += 1 self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_shakedropresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ShakeDrop-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 channels_per_layers = [16, 32, 64] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] total_layers = sum(layers) final_death_prob = 0.5 life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)] net = CIFARShakeDropResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, life_probs=life_probs, num_classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shakedropresnet20_cifar10(classes=10, **kwargs): """ ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="shakedropresnet20_cifar10", **kwargs) def shakedropresnet20_cifar100(classes=100, **kwargs): """ ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="shakedropresnet20_cifar100", **kwargs) def shakedropresnet20_svhn(classes=10, **kwargs): """ ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="shakedropresnet20_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (shakedropresnet20_cifar10, 10), (shakedropresnet20_cifar100, 100), (shakedropresnet20_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shakedropresnet20_cifar10 or weight_count == 272474) assert (model != shakedropresnet20_cifar100 or weight_count == 278324) assert (model != shakedropresnet20_svhn or weight_count == 272474) x = torch.randn(14, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, num_classes)) if __name__ == "__main__": _test()
10,750
31.677812
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/inceptionresnetv1.py
""" InceptionResNetV1 for ImageNet-1K, implemented in PyTorch. Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. """ __all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit', 'ReductionAUnit', 'ReductionBUnit'] import os import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch class InceptionAUnit(nn.Module): """ InceptionResNetV1 type Inception-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int List for numbers of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, bn_eps): super(InceptionAUnit, self).__init__() self.scale = 0.17 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=out_channels_list[0], bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[1:3], kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[3:6], kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5] self.conv = conv1x1( in_channels=conv_in_channels, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity x = self.activ(x) return x class InceptionBUnit(nn.Module): """ InceptionResNetV1 type Inception-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int List for numbers of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, bn_eps): super(InceptionBUnit, self).__init__() self.scale = 0.10 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=out_channels_list[0], bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[1:4], kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps)) conv_in_channels = out_channels_list[0] + out_channels_list[3] self.conv = conv1x1( in_channels=conv_in_channels, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity x = self.activ(x) return x class InceptionCUnit(nn.Module): """ InceptionResNetV1 type Inception-C unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int List for numbers of output channels. bn_eps : float Small float added to variance in Batch norm. scale : float, default 0.2 Scale value for residual branch. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels_list, bn_eps, scale=0.2, activate=True): super(InceptionCUnit, self).__init__() self.activate = activate self.scale = scale self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=out_channels_list[0], bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[1:4], kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), bn_eps=bn_eps)) conv_in_channels = out_channels_list[0] + out_channels_list[3] self.conv = conv1x1( in_channels=conv_in_channels, out_channels=in_channels, bias=True) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.branches(x) x = self.conv(x) x = self.scale * x + identity if self.activate: x = self.activ(x) return x class ReductionAUnit(nn.Module): """ InceptionResNetV1 type Reduction-A unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int List for numbers of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, bn_eps): super(ReductionAUnit, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[0:1], kernel_size_list=(3,), strides_list=(2,), padding_list=(0,), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[1:4], kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class ReductionBUnit(nn.Module): """ InceptionResNetV1 type Reduction-B unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int List for numbers of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels_list, bn_eps): super(ReductionBUnit, self).__init__() self.branches = Concurrent() self.branches.add_module("branch1", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[0:2], kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[2:4], kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=out_channels_list[4:7], kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0), bn_eps=bn_eps)) self.branches.add_module("branch4", MaxPoolBranch()) def forward(self, x): x = self.branches(x) return x class InceptInitBlock(nn.Module): """ InceptionResNetV1 specific initial block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block( in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block( in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.conv6 = conv3x3_block( in_channels=192, out_channels=256, stride=2, padding=0, bn_eps=bn_eps) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) x = self.conv4(x) x = self.conv5(x) x = self.conv6(x) return x class InceptHead(nn.Module): """ InceptionResNetV1 specific classification block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. dropout_rate : float Fraction of the input units to drop. Must be a number between 0 and 1. num_classes : int Number of classification classes. """ def __init__(self, in_channels, bn_eps, dropout_rate, num_classes): super(InceptHead, self).__init__() self.use_dropout = (dropout_rate != 0.0) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) self.fc1 = nn.Linear( in_features=in_channels, out_features=512, bias=False) self.bn = nn.BatchNorm1d( num_features=512, eps=bn_eps) self.fc2 = nn.Linear( in_features=512, out_features=num_classes) def forward(self, x): if self.use_dropout: x = self.dropout(x) x = self.fc1(x) x = self.bn(x) x = self.fc2(x) return x class InceptionResNetV1(nn.Module): """ InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, dropout_prob=0.6, bn_eps=1e-5, in_channels=3, in_size=(299, 299), num_classes=1000): super(InceptionResNetV1, self).__init__() self.in_size = in_size self.num_classes = num_classes layers = [5, 11, 7] in_channels_list = [256, 896, 1792] normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]] reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", InceptInitBlock( in_channels=in_channels, bn_eps=bn_eps)) in_channels = in_channels_list[0] for i, layers_per_stage in enumerate(layers): stage = nn.Sequential() for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] out_channels_list_per_stage = reduction_out_channels_list[i - 1] else: unit = normal_units[i] out_channels_list_per_stage = normal_out_channels_list[i] if (i == len(layers) - 1) and (j == layers_per_stage - 1): unit_kwargs = {"scale": 1.0, "activate": False} else: unit_kwargs = {} stage.add_module("unit{}".format(j + 1), unit( in_channels=in_channels, out_channels_list=out_channels_list_per_stage, bn_eps=bn_eps, **unit_kwargs)) if (j == 0) and (i != 0): in_channels = in_channels_list[i] self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = InceptHead( in_channels=in_channels, bn_eps=bn_eps, dropout_rate=dropout_prob, num_classes=num_classes) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_inceptionresnetv1(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create InceptionResNetV1 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = InceptionResNetV1(**kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def inceptionresnetv1(**kwargs): """ InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ inceptionresnetv1, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionresnetv1 or weight_count == 23995624) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
16,987
30.285451
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/scnet.py
""" SCNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. """ __all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, InterpolationBlock from .resnet import ResInitBlock from .senet import SEInitBlock from .resnesta import ResNeStADownBlock class ScDownBlock(nn.Module): """ SCNet specific convolutional downscale block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. pool_size: int or list/tuple of 2 ints, default 2 Size of the average pooling windows. """ def __init__(self, in_channels, out_channels, pool_size=2): super(ScDownBlock, self).__init__() self.pool = nn.AvgPool2d( kernel_size=pool_size, stride=pool_size) self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ScConv(nn.Module): """ Self-calibrated convolutional block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. scale_factor : int Scale factor. """ def __init__(self, in_channels, out_channels, stride, scale_factor): super(ScConv, self).__init__() self.down = ScDownBlock( in_channels=in_channels, out_channels=out_channels, pool_size=scale_factor) self.up = InterpolationBlock( scale_factor=scale_factor, mode="nearest", align_corners=None) self.sigmoid = nn.Sigmoid() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=in_channels, activation=None) self.conv2 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x): w = self.sigmoid(x + self.up(self.down(x), size=x.shape[2:])) x = self.conv1(x) * w x = self.conv2(x) return x class ScBottleneck(nn.Module): """ SCNet specific bottleneck block for residual path in SCNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck_factor : int, default 4 Bottleneck factor. scale_factor : int, default 4 Scale factor. avg_downsample : bool, default False Whether to use average downsampling. """ def __init__(self, in_channels, out_channels, stride, bottleneck_factor=4, scale_factor=4, avg_downsample=False): super(ScBottleneck, self).__init__() self.avg_resize = (stride > 1) and avg_downsample mid_channels = out_channels // bottleneck_factor // 2 self.conv1a = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2a = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if self.avg_resize else stride)) self.conv1b = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2b = ScConv( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if self.avg_resize else stride), scale_factor=scale_factor) if self.avg_resize: self.pool = nn.AvgPool2d( kernel_size=3, stride=stride, padding=1) self.conv3 = conv1x1_block( in_channels=(2 * mid_channels), out_channels=out_channels, activation=None) def forward(self, x): y = self.conv1a(x) y = self.conv2a(y) z = self.conv1b(x) z = self.conv2b(z) if self.avg_resize: y = self.pool(y) z = self.pool(z) x = torch.cat((y, z), dim=1) x = self.conv3(x) return x class ScUnit(nn.Module): """ SCNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. avg_downsample : bool, default False Whether to use average downsampling. """ def __init__(self, in_channels, out_channels, stride, avg_downsample=False): super(ScUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = ScBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, avg_downsample=avg_downsample) if self.resize_identity: if avg_downsample: self.identity_block = ResNeStADownBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.identity_block = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_block(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class SCNet(nn.Module): """ SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. se_init_block : bool, default False SENet-like initial block. avg_downsample : bool, default False Whether to use average downsampling. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, se_init_block=False, avg_downsample=False, in_channels=3, in_size=(224, 224), num_classes=1000): super(SCNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() init_block_class = SEInitBlock if se_init_block else ResInitBlock self.features.add_module("init_block", init_block_class( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ScUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, avg_downsample=avg_downsample)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_scnet(blocks, width_scale=1.0, se_init_block=False, avg_downsample=False, init_block_channels_scale=1, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SCNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. width_scale : float, default 1.0 Scale factor for width of layers. se_init_block : bool, default False SENet-like initial block. avg_downsample : bool, default False Whether to use average downsampling. init_block_channels_scale : int, default 1 Scale factor for number of output channels in the initial unit. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 14: layers = [1, 1, 1, 1] elif blocks == 26: layers = [2, 2, 2, 2] elif blocks == 38: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks)) assert (sum(layers) * 3 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] init_block_channels *= init_block_channels_scale bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = SCNet( channels=channels, init_block_channels=init_block_channels, se_init_block=se_init_block, avg_downsample=avg_downsample, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def scnet50(**kwargs): """ SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_scnet(blocks=50, model_name="scnet50", **kwargs) def scnet101(**kwargs): """ SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_scnet(blocks=101, model_name="scnet101", **kwargs) def scneta50(**kwargs): """ SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs) def scneta101(**kwargs): """ SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2, model_name="scneta101", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ scnet50, scnet101, scneta50, scneta101, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != scnet50 or weight_count == 25564584) assert (model != scnet101 or weight_count == 44565416) assert (model != scneta50 or weight_count == 25583816) assert (model != scneta101 or weight_count == 44689192) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,943
29.876033
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/igcv3.py
""" IGCV3 for ImageNet-1K, implemented in PyTorch. Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. """ __all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle class InvResUnit(nn.Module): """ So-called 'Inverted Residual Unit' layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. expansion : bool Whether do expansion of channels. """ def __init__(self, in_channels, out_channels, stride, expansion): super(InvResUnit, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) mid_channels = in_channels * 6 if expansion else in_channels groups = 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, groups=groups, activation=None) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation="relu6") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, groups=groups, activation=None) def forward(self, x): if self.residual: identity = x x = self.conv1(x) x = self.c_shuffle(x) x = self.conv2(x) x = self.conv3(x) if self.residual: x = x + identity return x class IGCV3(nn.Module): """ IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(IGCV3, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation="relu6")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) stage.add_module("unit{}".format(j + 1), InvResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, activation="relu6")) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_igcv3(width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IGCV3-D model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 final_block_channels = 1280 layers = [1, 4, 6, 8, 6, 6, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: def make_even(x): return x if (x % 2 == 0) else x + 1 channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels] init_block_channels = make_even(int(init_block_channels * width_scale)) if width_scale > 1.0: final_block_channels = make_even(int(final_block_channels * width_scale)) net = IGCV3( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def igcv3_w1(**kwargs): """ IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs) def igcv3_w3d4(**kwargs): """ IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs) def igcv3_wd2(**kwargs): """ IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs) def igcv3_wd4(**kwargs): """ IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,' https://arxiv.org/abs/1806.00178. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ igcv3_w1, igcv3_w3d4, igcv3_wd2, igcv3_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != igcv3_w1 or weight_count == 3491688) assert (model != igcv3_w3d4 or weight_count == 2638084) assert (model != igcv3_wd2 or weight_count == 1985528) assert (model != igcv3_wd4 or weight_count == 1534020) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,829
30.709677
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/seresnet_cifar.py
""" SE-ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn', 'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn', 'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn', 'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn', 'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn', 'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn', 'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn', 'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .seresnet import SEResUnit class CIFARSEResNet(nn.Module): """ SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification num_classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARSEResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_seresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SE-ResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification num_classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARSEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnet20_cifar10(num_classes=10, **kwargs): """ SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs) def seresnet20_cifar100(num_classes=100, **kwargs): """ SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs) def seresnet20_svhn(num_classes=10, **kwargs): """ SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs) def seresnet56_cifar10(num_classes=10, **kwargs): """ SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs) def seresnet56_cifar100(num_classes=100, **kwargs): """ SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs) def seresnet56_svhn(num_classes=10, **kwargs): """ SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs) def seresnet110_cifar10(num_classes=10, **kwargs): """ SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs) def seresnet110_cifar100(num_classes=100, **kwargs): """ SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100", **kwargs) def seresnet110_svhn(num_classes=10, **kwargs): """ SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs) def seresnet164bn_cifar10(num_classes=10, **kwargs): """ SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10", **kwargs) def seresnet164bn_cifar100(num_classes=100, **kwargs): """ SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100", **kwargs) def seresnet164bn_svhn(num_classes=10, **kwargs): """ SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs) def seresnet272bn_cifar10(num_classes=10, **kwargs): """ SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10", **kwargs) def seresnet272bn_cifar100(num_classes=100, **kwargs): """ SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100", **kwargs) def seresnet272bn_svhn(num_classes=10, **kwargs): """ SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs) def seresnet542bn_cifar10(num_classes=10, **kwargs): """ SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10", **kwargs) def seresnet542bn_cifar100(num_classes=100, **kwargs): """ SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100", **kwargs) def seresnet542bn_svhn(num_classes=10, **kwargs): """ SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs) def seresnet1001_cifar10(num_classes=10, **kwargs): """ SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10", **kwargs) def seresnet1001_cifar100(num_classes=100, **kwargs): """ SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100", **kwargs) def seresnet1001_svhn(num_classes=10, **kwargs): """ SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs) def seresnet1202_cifar10(num_classes=10, **kwargs): """ SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10", **kwargs) def seresnet1202_cifar100(num_classes=100, **kwargs): """ SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100", **kwargs) def seresnet1202_svhn(num_classes=10, **kwargs): """ SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (seresnet20_cifar10, 10), (seresnet20_cifar100, 100), (seresnet20_svhn, 10), (seresnet56_cifar10, 10), (seresnet56_cifar100, 100), (seresnet56_svhn, 10), (seresnet110_cifar10, 10), (seresnet110_cifar100, 100), (seresnet110_svhn, 10), (seresnet164bn_cifar10, 10), (seresnet164bn_cifar100, 100), (seresnet164bn_svhn, 10), (seresnet272bn_cifar10, 10), (seresnet272bn_cifar100, 100), (seresnet272bn_svhn, 10), (seresnet542bn_cifar10, 10), (seresnet542bn_cifar100, 100), (seresnet542bn_svhn, 10), (seresnet1001_cifar10, 10), (seresnet1001_cifar100, 100), (seresnet1001_svhn, 10), (seresnet1202_cifar10, 10), (seresnet1202_cifar100, 100), (seresnet1202_svhn, 10), ] for model, num_num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet20_cifar10 or weight_count == 274847) assert (model != seresnet20_cifar100 or weight_count == 280697) assert (model != seresnet20_svhn or weight_count == 274847) assert (model != seresnet56_cifar10 or weight_count == 862889) assert (model != seresnet56_cifar100 or weight_count == 868739) assert (model != seresnet56_svhn or weight_count == 862889) assert (model != seresnet110_cifar10 or weight_count == 1744952) assert (model != seresnet110_cifar100 or weight_count == 1750802) assert (model != seresnet110_svhn or weight_count == 1744952) assert (model != seresnet164bn_cifar10 or weight_count == 1906258) assert (model != seresnet164bn_cifar100 or weight_count == 1929388) assert (model != seresnet164bn_svhn or weight_count == 1906258) assert (model != seresnet272bn_cifar10 or weight_count == 3153826) assert (model != seresnet272bn_cifar100 or weight_count == 3176956) assert (model != seresnet272bn_svhn or weight_count == 3153826) assert (model != seresnet542bn_cifar10 or weight_count == 6272746) assert (model != seresnet542bn_cifar100 or weight_count == 6295876) assert (model != seresnet542bn_svhn or weight_count == 6272746) assert (model != seresnet1001_cifar10 or weight_count == 11574910) assert (model != seresnet1001_cifar100 or weight_count == 11598040) assert (model != seresnet1001_svhn or weight_count == 11574910) assert (model != seresnet1202_cifar10 or weight_count == 19582226) assert (model != seresnet1202_cifar100 or weight_count == 19588076) assert (model != seresnet1202_svhn or weight_count == 19582226) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_num_classes)) if __name__ == "__main__": _test()
24,036
36.324534
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnetd.py
""" ResNet(D) with dilation for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b'] import os import torch.nn as nn import torch.nn.init as init from .common import MultiOutputSequential from .resnet import ResUnit, ResInitBlock from .senet import SEInitBlock class ResNetD(nn.Module): """ ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. ordinary_init : bool, default False Whether to use original initial block or SENet one. bends : tuple of int, default None Numbers of bends for multiple output. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, ordinary_init=False, bends=None, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNetD, self).__init__() self.in_size = in_size self.num_classes = num_classes self.multi_output = (bends is not None) self.features = MultiOutputSequential() if ordinary_init: self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) else: init_block_channels = 2 * init_block_channels self.features.add_module("init_block", SEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1 dilation = (2 ** max(0, i - 1 - int(j == 0))) stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=dilation, dilation=dilation, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels if self.multi_output and ((i + 1) in bends): stage.do_output = True self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): outs = self.features(x) x = outs[0] x = x.view(x.size(0), -1) x = self.output(x) if self.multi_output: return [x] + outs[1:] else: return x def get_resnetd(blocks, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet(D) with dilation model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14: layers = [2, 2, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNetD( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnetd50b(**kwargs): """ ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs) def resnetd101b(**kwargs): """ ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs) def resnetd152b(**kwargs): """ ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch ordinary_init = False bends = None pretrained = False models = [ resnetd50b, resnetd101b, resnetd152b, ] for model in models: net = model( pretrained=pretrained, ordinary_init=ordinary_init, bends=bends) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if ordinary_init: assert (model != resnetd50b or weight_count == 25557032) assert (model != resnetd101b or weight_count == 44549160) assert (model != resnetd152b or weight_count == 60192808) else: assert (model != resnetd50b or weight_count == 25680808) assert (model != resnetd101b or weight_count == 44672936) assert (model != resnetd152b or weight_count == 60316584) x = torch.randn(1, 3, 224, 224) y = net(x) if bends is not None: y = y[0] y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,674
32.362069
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/quartznet.py
""" QuartzNet for ASR, implemented in PyTorch. Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. """ __all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de', 'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru', 'quartznet15x5_ru34'] from .jasper import get_jasper def quartznet5x5_en_ls(num_classes=29, **kwargs): """ QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(num_classes=num_classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet5x5_en_ls", **kwargs) def quartznet15x5_en(num_classes=29, **kwargs): """ QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en", **kwargs) def quartznet15x5_en_nr(num_classes=29, **kwargs): """ QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en_nr", **kwargs) def quartznet15x5_fr(num_classes=43, **kwargs): """ QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 43 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï', 'ü', 'ÿ'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_fr", **kwargs) def quartznet15x5_de(num_classes=32, **kwargs): """ QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 32 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_de", **kwargs) def quartznet15x5_it(num_classes=39, **kwargs): """ QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_it", **kwargs) def quartznet15x5_es(num_classes=36, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 36 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_es", **kwargs) def quartznet15x5_ca(num_classes=39, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ca", **kwargs) def quartznet15x5_pl(num_classes=34, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń', 'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_pl", **kwargs) def quartznet15x5_ru(num_classes=35, **kwargs): """ QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 35 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru", **kwargs) def quartznet15x5_ru34(num_classes=34, **kwargs): """ QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- num_classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(num_classes=num_classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru34", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import numpy as np import torch pretrained = False from_audio = False audio_features = 64 use_cuda = True models = [ quartznet5x5_en_ls, quartznet15x5_en, quartznet15x5_en_nr, quartznet15x5_fr, quartznet15x5_de, quartznet15x5_it, quartznet15x5_es, quartznet15x5_ca, quartznet15x5_pl, quartznet15x5_ru, quartznet15x5_ru34, ] for model in models: net = model( in_channels=audio_features, from_audio=from_audio, pretrained=pretrained) if use_cuda: net = net.cuda() # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != quartznet5x5_en_ls or weight_count == 6713181) assert (model != quartznet15x5_en or weight_count == 18924381) assert (model != quartznet15x5_en_nr or weight_count == 18924381) assert (model != quartznet15x5_fr or weight_count == 18938731) assert (model != quartznet15x5_de or weight_count == 18927456) assert (model != quartznet15x5_it or weight_count == 18934631) assert (model != quartznet15x5_es or weight_count == 18931556) assert (model != quartznet15x5_ca or weight_count == 18934631) assert (model != quartznet15x5_pl or weight_count == 18929506) assert (model != quartznet15x5_ru or weight_count == 18930531) assert (model != quartznet15x5_ru34 or weight_count == 18929506) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max) x = torch.randn(x_shape) x_len = torch.tensor(seq_len, dtype=torch.long, device=x.device) if use_cuda: x = x.cuda() x_len = x_len.cuda() y, y_len = net(x, x_len) # y.sum().backward() assert (tuple(y.size())[:2] == (batch, net.num_classes)) if from_audio: assert (y.size()[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.size()[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
13,675
42.141956
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/preresnet.py
""" PreResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4', 'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34', 'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152', 'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck', 'PreResUnit', 'PreResInitBlock', 'PreResActivation'] import os import torch.nn as nn import torch.nn.init as init from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1 class PreResBlock(nn.Module): """ Simple PreResNet block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(PreResBlock, self).__init__() self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn, return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn) def forward(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) return x, x_pre_activ class PreResBottleneck(nn.Module): """ PreResNet bottleneck block for residual path in PreResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, conv1_stride): super(PreResBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1), return_preact=True) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride)) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x, x_pre_activ = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x, x_pre_activ class PreResUnit(nn.Module): """ PreResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(PreResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = PreResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = PreResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias) def forward(self, x): identity = x x, x_pre_activ = self.body(x) if self.resize_identity: identity = self.identity_conv(x_pre_activ) x = x + identity return x class PreResInitBlock(nn.Module): """ PreResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PreResInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class PreResActivation(nn.Module): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PreResActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class PreResNet(nn.Module): """ PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(PreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 stage.add_module("unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_preresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PreResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] elif blocks == 269: layers = [3, 30, 48, 8] else: raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = PreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def preresnet10(**kwargs): """ PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=10, model_name="preresnet10", **kwargs) def preresnet12(**kwargs): """ PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=12, model_name="preresnet12", **kwargs) def preresnet14(**kwargs): """ PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, model_name="preresnet14", **kwargs) def preresnetbc14b(**kwargs): """ PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs) def preresnet16(**kwargs): """ PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=16, model_name="preresnet16", **kwargs) def preresnet18_wd4(**kwargs): """ PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs) def preresnet18_wd2(**kwargs): """ PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs) def preresnet18_w3d4(**kwargs): """ PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs) def preresnet18(**kwargs): """ PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=18, model_name="preresnet18", **kwargs) def preresnet26(**kwargs): """ PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs) def preresnetbc26b(**kwargs): """ PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs) def preresnet34(**kwargs): """ PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=34, model_name="preresnet34", **kwargs) def preresnetbc38b(**kwargs): """ PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs) def preresnet50(**kwargs): """ PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, model_name="preresnet50", **kwargs) def preresnet50b(**kwargs): """ PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs) def preresnet101(**kwargs): """ PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, model_name="preresnet101", **kwargs) def preresnet101b(**kwargs): """ PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs) def preresnet152(**kwargs): """ PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, model_name="preresnet152", **kwargs) def preresnet152b(**kwargs): """ PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs) def preresnet200(**kwargs): """ PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, model_name="preresnet200", **kwargs) def preresnet200b(**kwargs): """ PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs) def preresnet269b(**kwargs): """ PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ preresnet10, preresnet12, preresnet14, preresnetbc14b, preresnet16, preresnet18_wd4, preresnet18_wd2, preresnet18_w3d4, preresnet18, preresnet26, preresnetbc26b, preresnet34, preresnetbc38b, preresnet50, preresnet50b, preresnet101, preresnet101b, preresnet152, preresnet152b, preresnet200, preresnet200b, preresnet269b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet10 or weight_count == 5417128) assert (model != preresnet12 or weight_count == 5491112) assert (model != preresnet14 or weight_count == 5786536) assert (model != preresnetbc14b or weight_count == 10057384) assert (model != preresnet16 or weight_count == 6967208) assert (model != preresnet18_wd4 or weight_count == 3935960) assert (model != preresnet18_wd2 or weight_count == 5802440) assert (model != preresnet18_w3d4 or weight_count == 8473784) assert (model != preresnet18 or weight_count == 11687848) assert (model != preresnet26 or weight_count == 17958568) assert (model != preresnetbc26b or weight_count == 15987624) assert (model != preresnet34 or weight_count == 21796008) assert (model != preresnetbc38b or weight_count == 21917864) assert (model != preresnet50 or weight_count == 25549480) assert (model != preresnet50b or weight_count == 25549480) assert (model != preresnet101 or weight_count == 44541608) assert (model != preresnet101b or weight_count == 44541608) assert (model != preresnet152 or weight_count == 60185256) assert (model != preresnet152b or weight_count == 60185256) assert (model != preresnet200 or weight_count == 64666280) assert (model != preresnet200b or weight_count == 64666280) assert (model != preresnet269b or weight_count == 102065832) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
26,501
32.044888
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/lednet.py
""" LEDNet for image segmentation, implemented in PyTorch. Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. """ __all__ = ['LEDNet', 'lednet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, asym_conv3x3_block, ChannelShuffle,\ InterpolationBlock, Hourglass, BreakBlock from .enet import ENetMixDownBlock class LEDBranch(nn.Module): """ LEDNet encoder branch. Parameters: ---------- channels : int Number of input/output channels. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilation, dropout_rate, bn_eps): super(LEDBranch, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.conv1 = asym_conv3x3_block( channels=channels, bias=True, lw_use_bn=False, bn_eps=bn_eps) self.conv2 = asym_conv3x3_block( channels=channels, padding=dilation, dilation=dilation, bias=True, lw_use_bn=False, bn_eps=bn_eps, rw_activation=None) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) return x class LEDUnit(nn.Module): """ LEDNet encoder unit (Split-Shuffle-non-bottleneck). Parameters: ---------- channels : int Number of input/output channels. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilation, dropout_rate, bn_eps): super(LEDUnit, self).__init__() mid_channels = channels // 2 self.left_branch = LEDBranch( channels=mid_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps) self.right_branch = LEDBranch( channels=mid_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps) self.activ = nn.ReLU(inplace=True) self.shuffle = ChannelShuffle( channels=channels, groups=2) def forward(self, x): identity = x x1, x2 = torch.chunk(x, chunks=2, dim=1) x1 = self.left_branch(x1) x2 = self.right_branch(x2) x = torch.cat((x1, x2), dim=1) x = x + identity x = self.activ(x) x = self.shuffle(x) return x class PoolingBranch(nn.Module): """ Pooling branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. bn_eps : float Small float added to variance in Batch norm. in_size : tuple of 2 int or None Spatial size of input image. down_size : int Spatial size of downscaled image. """ def __init__(self, in_channels, out_channels, bias, bn_eps, in_size, down_size): super(PoolingBranch, self).__init__() self.in_size = in_size self.pool = nn.AdaptiveAvgPool2d(output_size=down_size) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, bn_eps=bn_eps) self.up = InterpolationBlock( scale_factor=None, out_size=in_size) def forward(self, x): in_size = self.in_size if self.in_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = self.up(x, in_size) return x class APN(nn.Module): """ Attention pyramid network block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. in_size : tuple of 2 int or None Spatial size of input image. """ def __init__(self, in_channels, out_channels, bn_eps, in_size): super(APN, self).__init__() self.in_size = in_size att_out_channels = 1 self.pool_branch = PoolingBranch( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps, in_size=in_size, down_size=1) self.body = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps) down_seq = nn.Sequential() down_seq.add_module("down1", conv7x7_block( in_channels=in_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps)) down_seq.add_module("down2", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps)) down3_subseq = nn.Sequential() down3_subseq.add_module("conv1", conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps)) down3_subseq.add_module("conv2", conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps)) down_seq.add_module("down3", down3_subseq) up_seq = nn.Sequential() up = InterpolationBlock(scale_factor=2) up_seq.add_module("up1", up) up_seq.add_module("up2", up) up_seq.add_module("up3", up) skip_seq = nn.Sequential() skip_seq.add_module("skip1", BreakBlock()) skip_seq.add_module("skip2", conv7x7_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps)) skip_seq.add_module("skip3", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps)) self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq) def forward(self, x): y = self.pool_branch(x) w = self.hg(x) x = self.body(x) x = x * w x = x + y return x class LEDNet(nn.Module): """ LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. Parameters: ---------- channels : list of int Number of output channels for each unit. dilations : list of int Dilations for units. dropout_rates : list of list of int Dropout rates for each unit in encoder. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, dilations, dropout_rates, correct_size_mismatch=False, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(LEDNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.encoder = nn.Sequential() for i, dilations_per_stage in enumerate(dilations): out_channels = channels[i] dropout_rate = dropout_rates[i] stage = nn.Sequential() for j, dilation in enumerate(dilations_per_stage): if j == 0: stage.add_module("unit{}".format(j + 1), ENetMixDownBlock( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps, correct_size_mismatch=correct_size_mismatch)) in_channels = out_channels else: stage.add_module("unit{}".format(j + 1), LEDUnit( channels=in_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps)) self.encoder.add_module("stage{}".format(i + 1), stage) self.apn = APN( in_channels=in_channels, out_channels=num_classes, bn_eps=bn_eps, in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None) self.up = InterpolationBlock( scale_factor=8, align_corners=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.encoder(x) x = self.apn(x) x = self.up(x) return x def get_lednet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create LEDNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [32, 64, 128] dilations = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]] dropout_rates = [0.03, 0.03, 0.3] bn_eps = 1e-3 net = LEDNet( channels=channels, dilations=dilations, dropout_rates=dropout_rates, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def lednet_cityscapes(num_classes=19, **kwargs): """ LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1905.02423. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_lednet(num_classes=num_classes, model_name="lednet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True correct_size_mismatch = False in_size = (1024, 2048) classes = 19 models = [ lednet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, correct_size_mismatch=correct_size_mismatch) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != lednet_cityscapes or weight_count == 922821) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
13,638
29.241685
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/superpointnet.py
""" SuperPointNet for HPatches (image matching), implemented in PyTorch. Original paper: 'SuperPoint: Self-Supervised Interest Point Detection and Description,' https://arxiv.org/abs/1712.07629. """ __all__ = ['SuperPointNet', 'superpointnet'] import os import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from .common import conv1x1, conv3x3_block class SPHead(nn.Module): """ SuperPointNet head block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(SPHead, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, bias=True, use_bn=False) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class SPDetector(nn.Module): """ SuperPointNet detector. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. conf_thresh : float, default 0.015 Confidence threshold. nms_dist : int, default 4 NMS distance. border_size : int, default 4 Image border size to remove points. reduction : int, default 8 Feature reduction factor. """ def __init__(self, in_channels, mid_channels, conf_thresh=0.015, nms_dist=4, border_size=4, reduction=8): super(SPDetector, self).__init__() self.conf_thresh = conf_thresh self.nms_dist = nms_dist self.border_size = border_size self.reduction = reduction num_classes = reduction * reduction + 1 self.detector = SPHead( in_channels=in_channels, mid_channels=mid_channels, out_channels=num_classes) def forward(self, x): batch = x.size(0) x_height, x_width = x.size()[-2:] img_height = x_height * self.reduction img_width = x_width * self.reduction semi = self.detector(x) dense = semi.softmax(dim=1) nodust = dense[:, :-1, :, :] heatmap = nodust.permute(0, 2, 3, 1) heatmap = heatmap.reshape((-1, x_height, x_width, self.reduction, self.reduction)) heatmap = heatmap.permute(0, 1, 3, 2, 4) heatmap = heatmap.reshape((-1, 1, x_height * self.reduction, x_width * self.reduction)) heatmap_mask = (heatmap >= self.conf_thresh) pad = self.nms_dist bord = self.border_size + pad heatmap_mask2 = F.pad(heatmap_mask, pad=(pad, pad, pad, pad)) pts_list = [] confs_list = [] for i in range(batch): heatmap_i = heatmap[i, 0] heatmap_mask_i = heatmap_mask[i, 0] heatmap_mask2_i = heatmap_mask2[i, 0] src_pts = torch.nonzero(heatmap_mask_i) src_confs = torch.masked_select(heatmap_i, heatmap_mask_i) src_inds = torch.argsort(src_confs, descending=True) dst_inds = torch.zeros_like(src_inds) dst_pts_count = 0 for ind_j in src_inds: pt = src_pts[ind_j] + pad assert (pad <= pt[0] < heatmap_mask2_i.shape[0] - pad) assert (pad <= pt[1] < heatmap_mask2_i.shape[1] - pad) assert (0 <= pt[0] - pad < img_height) assert (0 <= pt[1] - pad < img_width) if heatmap_mask2_i[pt[0], pt[1]] == 1: heatmap_mask2_i[(pt[0] - pad):(pt[0] + pad + 1), (pt[1] - pad):(pt[1] + pad + 1)] = 0 if (bord < pt[0] - pad <= img_height - bord) and (bord < pt[1] - pad <= img_width - bord): dst_inds[dst_pts_count] = ind_j dst_pts_count += 1 dst_inds = dst_inds[:dst_pts_count] dst_pts = torch.index_select(src_pts, dim=0, index=dst_inds) dst_confs = torch.index_select(src_confs, dim=0, index=dst_inds) pts_list.append(dst_pts) confs_list.append(dst_confs) return pts_list, confs_list class SPDescriptor(nn.Module): """ SuperPointNet descriptor generator. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. descriptor_length : int, default 256 Descriptor length. transpose_descriptors : bool, default True Whether transpose descriptors with respect to points. reduction : int, default 8 Feature reduction factor. """ def __init__(self, in_channels, mid_channels, descriptor_length=256, transpose_descriptors=True, reduction=8): super(SPDescriptor, self).__init__() self.desc_length = descriptor_length self.transpose_descriptors = transpose_descriptors self.reduction = reduction self.head = SPHead( in_channels=in_channels, mid_channels=mid_channels, out_channels=descriptor_length) def forward(self, x, pts_list): x_height, x_width = x.size()[-2:] coarse_desc_map = self.head(x) coarse_desc_map = F.normalize(coarse_desc_map) descriptors_list = [] for i, pts in enumerate(pts_list): pts = pts.float() pts[:, 0] = pts[:, 0] / (0.5 * x_height * self.reduction) - 1.0 pts[:, 1] = pts[:, 1] / (0.5 * x_width * self.reduction) - 1.0 if self.transpose_descriptors: pts = torch.index_select(pts, dim=1, index=torch.tensor([1, 0], device=pts.device)) pts = pts.unsqueeze(0).unsqueeze(0) descriptors = F.grid_sample(coarse_desc_map[i:(i + 1)], pts) descriptors = descriptors.squeeze(0).squeeze(1) descriptors = descriptors.transpose(0, 1) descriptors = F.normalize(descriptors) descriptors_list.append(descriptors) return descriptors_list class SuperPointNet(nn.Module): """ SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,' https://arxiv.org/abs/1712.07629. Parameters: ---------- channels : list of list of int Number of output channels for each unit. final_block_channels : int Number of output channels for the final units. transpose_descriptors : bool, default True Whether transpose descriptors with respect to points. in_channels : int, default 1 Number of input channels. """ def __init__(self, channels, final_block_channels, transpose_descriptors=True, in_channels=1): super(SuperPointNet, self).__init__() self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): stage.add_module("reduce{}".format(i + 1), nn.MaxPool2d( kernel_size=2, stride=2)) stage.add_module("unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=True, use_bn=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.detector = SPDetector( in_channels=in_channels, mid_channels=final_block_channels) self.descriptor = SPDescriptor( in_channels=in_channels, mid_channels=final_block_channels, transpose_descriptors=transpose_descriptors) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): assert (x.size(1) == 1) x = self.features(x) pts_list, confs_list = self.detector(x) descriptors_list = self.descriptor(x, pts_list) return pts_list, confs_list, descriptors_list def get_superpointnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SuperPointNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels_per_layers = [64, 64, 128, 128] layers = [2, 2, 2, 2] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] final_block_channels = 256 net = SuperPointNet( channels=channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def superpointnet(**kwargs): """ SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,' https://arxiv.org/abs/1712.07629. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_superpointnet(model_name="superpointnet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ superpointnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != superpointnet or weight_count == 1300865) # x = torch.randn(1, 1, 224, 224) x = torch.randn(1, 1, 1000, 2000) y = net(x) # y.sum().backward() assert (len(y) == 3) if __name__ == "__main__": _test()
11,418
31.719198
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ibndensenet.py
""" IBN-DenseNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. """ __all__ = ['IBNDenseNet', 'ibn_densenet121', 'ibn_densenet161', 'ibn_densenet169', 'ibn_densenet201'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import pre_conv3x3_block, IBN from .preresnet import PreResInitBlock, PreResActivation from .densenet import TransitionBlock class IBNPreConvBlock(nn.Module): """ IBN-Net specific convolution block with BN/IBN normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. use_ibn : bool, default False Whether use Instance-Batch Normalization. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_ibn=False, return_preact=False): super(IBNPreConvBlock, self).__init__() self.use_ibn = use_ibn self.return_preact = return_preact if self.use_ibn: self.ibn = IBN( channels=in_channels, first_fraction=0.6, inst_first=False) else: self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) def forward(self, x): if self.use_ibn: x = self.ibn(x) else: x = self.bn(x) x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def ibn_pre_conv1x1_block(in_channels, out_channels, stride=1, use_ibn=False, return_preact=False): """ 1x1 version of the IBN-Net specific pre-activated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. use_ibn : bool, default False Whether use Instance-Batch Normalization. return_preact : bool, default False Whether return pre-activation. """ return IBNPreConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, use_ibn=use_ibn, return_preact=return_preact) class IBNDenseUnit(nn.Module): """ IBN-DenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. """ def __init__(self, in_channels, out_channels, dropout_rate, conv1_ibn): super(IBNDenseUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size self.conv1 = ibn_pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, use_ibn=conv1_ibn) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=inc_channels) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = torch.cat((identity, x), dim=1) return x class IBNDenseNet(nn.Module): """ IBN-DenseNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, in_channels=3, in_size=(224, 224), num_classes=1000): super(IBNDenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): conv1_ibn = (i < 3) and (j % 3 == 0) stage.add_module("unit{}".format(j + 1), IBNDenseUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, conv1_ibn=conv1_ibn)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_ibndensenet(num_layers, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IBN-DenseNet model with specific parameters. Parameters: ---------- num_layers : int Number of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if num_layers == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif num_layers == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif num_layers == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif num_layers == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported IBN-DenseNet version with number of layers {}".format(num_layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = IBNDenseNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ibn_densenet121(**kwargs): """ IBN-DenseNet-121 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibndensenet(num_layers=121, model_name="ibn_densenet121", **kwargs) def ibn_densenet161(**kwargs): """ IBN-DenseNet-161 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibndensenet(num_layers=161, model_name="ibn_densenet161", **kwargs) def ibn_densenet169(**kwargs): """ IBN-DenseNet-169 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibndensenet(num_layers=169, model_name="ibn_densenet169", **kwargs) def ibn_densenet201(**kwargs): """ IBN-DenseNet-201 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibndensenet(num_layers=201, model_name="ibn_densenet201", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ ibn_densenet121, ibn_densenet161, ibn_densenet169, ibn_densenet201, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibn_densenet121 or weight_count == 7978856) assert (model != ibn_densenet161 or weight_count == 28681000) assert (model != ibn_densenet169 or weight_count == 14149480) assert (model != ibn_densenet201 or weight_count == 20013928) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,647
30.384615
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/hardnet.py
""" HarDNet for ImageNet-1K, implemented in PyTorch. Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. """ __all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block class InvDwsConvBlock(nn.Module): """ Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, pw_activation=(lambda: nn.ReLU(inplace=True)), dw_activation=(lambda: nn.ReLU(inplace=True))): super(InvDwsConvBlock, self).__init__() self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=pw_activation) self.dw_conv = dwconv_block( in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=dw_activation) def forward(self, x): x = self.pw_conv(x) x = self.dw_conv(x) return x def invdwsconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, bn_eps=1e-5, pw_activation=(lambda: nn.ReLU(inplace=True)), dw_activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 inverse depthwise separable version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. """ return InvDwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, pw_activation=pw_activation, dw_activation=dw_activation) class HarDUnit(nn.Module): """ HarDNet unit. Parameters: ---------- in_channels_list : list of int Number of input channels for each block. out_channels_list : list of int Number of output channels for each block. links_list : list of list of int List of indices for each layer. use_deptwise : bool Whether to use depthwise downsampling. use_dropout : bool Whether to use dropout module. downsampling : bool Whether to downsample input. activation : str Name of activation function. """ def __init__(self, in_channels_list, out_channels_list, links_list, use_deptwise, use_dropout, downsampling, activation): super(HarDUnit, self).__init__() self.links_list = links_list self.use_dropout = use_dropout self.downsampling = downsampling self.blocks = nn.Sequential() for i in range(len(links_list)): in_channels = in_channels_list[i] out_channels = out_channels_list[i] if use_deptwise: unit = invdwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, pw_activation=activation, dw_activation=None) else: unit = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.blocks.add_module("block{}".format(i + 1), unit) if self.use_dropout: self.dropout = nn.Dropout(p=0.1) self.conv = conv1x1_block( in_channels=in_channels_list[-1], out_channels=out_channels_list[-1], activation=activation) if self.downsampling: if use_deptwise: self.downsample = dwconv3x3_block( in_channels=out_channels_list[-1], out_channels=out_channels_list[-1], stride=2, activation=None) else: self.downsample = nn.MaxPool2d( kernel_size=2, stride=2) def forward(self, x): layer_outs = [x] for links_i, layer_i in zip(self.links_list, self.blocks._modules.values()): layer_in = [] for idx_ij in links_i: layer_in.append(layer_outs[idx_ij]) if len(layer_in) > 1: x = torch.cat(layer_in, dim=1) else: x = layer_in[0] out = layer_i(x) layer_outs.append(out) outs = [] for i, layer_out_i in enumerate(layer_outs): if (i == len(layer_outs) - 1) or (i % 2 == 1): outs.append(layer_out_i) x = torch.cat(outs, dim=1) if self.use_dropout: x = self.dropout(x) x = self.conv(x) if self.downsampling: x = self.downsample(x) return x class HarDInitBlock(nn.Module): """ HarDNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. use_deptwise : bool Whether to use depthwise downsampling. activation : str Name of activation function. """ def __init__(self, in_channels, out_channels, use_deptwise, activation): super(HarDInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2, activation=activation) conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block self.conv2 = conv2_block_class( in_channels=mid_channels, out_channels=out_channels, activation=activation) if use_deptwise: self.downsample = dwconv3x3_block( in_channels=out_channels, out_channels=out_channels, stride=2, activation=None) else: self.downsample = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.downsample(x) return x class HarDNet(nn.Module): """ HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- init_block_channels : int Number of output channels for the initial unit. unit_in_channels : list of list of list of int Number of input channels for each layer in each stage. unit_out_channels : list list of of list of int Number of output channels for each layer in each stage. unit_links : list of list of list of int List of indices for each layer in each stage. use_deptwise : bool Whether to use depthwise downsampling. use_last_dropout : bool Whether to use dropouts in the last unit. output_dropout_rate : float Parameter of Dropout layer before classifier. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, init_block_channels, unit_in_channels, unit_out_channels, unit_links, use_deptwise, use_last_dropout, output_dropout_rate, in_channels=3, in_size=(224, 224), num_classes=1000): super(HarDNet, self).__init__() self.in_size = in_size self.num_classes = num_classes activation = "relu6" self.features = nn.Sequential() self.features.add_module("init_block", HarDInitBlock( in_channels=in_channels, out_channels=init_block_channels, use_deptwise=use_deptwise, activation=activation)) for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)): stage = nn.Sequential() for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i, out_channels_list_i)): use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and use_last_dropout) downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1)) stage.add_module("unit{}".format(j + 1), HarDUnit( in_channels_list=in_channels_list_ij, out_channels_list=out_channels_list_ij, links_list=unit_links[i][j], use_deptwise=use_deptwise, use_dropout=use_dropout, downsampling=downsampling, activation=activation)) self.features.add_module("stage{}".format(i + 1), stage) in_channels = unit_out_channels[-1][-1][-1] self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=output_dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_hardnet(blocks, use_deptwise=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create HarDNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. use_deepwise : bool, default True Whether to use depthwise separable version of the model. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 39: init_block_channels = 48 growth_factor = 1.6 dropout_rate = 0.05 if use_deptwise else 0.1 layers = [4, 16, 8, 4] channels_per_layers = [96, 320, 640, 1024] growth_rates = [16, 20, 64, 160] downsamples = [1, 1, 1, 0] use_dropout = False elif blocks == 68: init_block_channels = 64 growth_factor = 1.7 dropout_rate = 0.05 if use_deptwise else 0.1 layers = [8, 16, 16, 16, 4] channels_per_layers = [128, 256, 320, 640, 1024] growth_rates = [14, 16, 20, 40, 160] downsamples = [1, 0, 1, 1, 0] use_dropout = False elif blocks == 85: init_block_channels = 96 growth_factor = 1.7 dropout_rate = 0.05 if use_deptwise else 0.2 layers = [8, 16, 16, 16, 16, 4] channels_per_layers = [192, 256, 320, 480, 720, 1280] growth_rates = [24, 24, 28, 36, 48, 256] downsamples = [1, 0, 1, 0, 1, 0] use_dropout = True else: raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks)) assert (downsamples[-1] == 0) def calc_stage_params(): def calc_unit_params(): def calc_blocks_params(layer_idx, base_channels, growth_rate): if layer_idx == 0: return base_channels, 0, [] out_channels_ij = growth_rate links_ij = [] for k in range(10): dv = 2 ** k if layer_idx % dv == 0: t = layer_idx - dv links_ij.append(t) if k > 0: out_channels_ij *= growth_factor out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2 in_channels_ij = 0 for t in links_ij: out_channels_ik, _, _ = calc_blocks_params( layer_idx=t, base_channels=base_channels, growth_rate=growth_rate) in_channels_ij += out_channels_ik return out_channels_ij, in_channels_ij, links_ij unit_out_channels = [] unit_in_channels = [] unit_links = [] for num_layers, growth_rate, base_channels, channels_per_layers_i in zip( layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers): stage_out_channels_i = 0 unit_out_channels_i = [] unit_in_channels_i = [] unit_links_i = [] for j in range(num_layers): out_channels_ij, in_channels_ij, links_ij = calc_blocks_params( layer_idx=(j + 1), base_channels=base_channels, growth_rate=growth_rate) unit_out_channels_i.append(out_channels_ij) unit_in_channels_i.append(in_channels_ij) unit_links_i.append(links_ij) if (j % 2 == 0) or (j == num_layers - 1): stage_out_channels_i += out_channels_ij unit_in_channels_i.append(stage_out_channels_i) unit_out_channels_i.append(channels_per_layers_i) unit_out_channels.append(unit_out_channels_i) unit_in_channels.append(unit_in_channels_i) unit_links.append(unit_links_i) return unit_out_channels, unit_in_channels, unit_links unit_out_channels, unit_in_channels, unit_links = calc_unit_params() stage_out_channels = [] stage_in_channels = [] stage_links = [] stage_out_channels_k = None for i in range(len(layers)): if stage_out_channels_k is None: stage_out_channels_k = [] stage_in_channels_k = [] stage_links_k = [] stage_out_channels_k.append(unit_out_channels[i]) stage_in_channels_k.append(unit_in_channels[i]) stage_links_k.append(unit_links[i]) if (downsamples[i] == 1) or (i == len(layers) - 1): stage_out_channels.append(stage_out_channels_k) stage_in_channels.append(stage_in_channels_k) stage_links.append(stage_links_k) stage_out_channels_k = None return stage_out_channels, stage_in_channels, stage_links stage_out_channels, stage_in_channels, stage_links = calc_stage_params() net = HarDNet( init_block_channels=init_block_channels, unit_in_channels=stage_in_channels, unit_out_channels=stage_out_channels, unit_links=stage_links, use_deptwise=use_deptwise, use_last_dropout=use_dropout, output_dropout_rate=dropout_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def hardnet39ds(**kwargs): """ HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs) def hardnet68ds(**kwargs): """ HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs) def hardnet68(**kwargs): """ HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs) def hardnet85(**kwargs): """ HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ hardnet39ds, hardnet68ds, hardnet68, hardnet85, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != hardnet39ds or weight_count == 3488228) assert (model != hardnet68ds or weight_count == 4180602) assert (model != hardnet68 or weight_count == 17565348) assert (model != hardnet85 or weight_count == 36670212) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
21,984
34.176
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sinet.py
""" SINet for image segmentation, implemented in PyTorch. Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. """ __all__ = ['SINet', 'sinet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, get_activation_layer, conv1x1_block, conv3x3_block, round_channels, dwconv_block,\ Concurrent, InterpolationBlock, ChannelShuffle class SEBlock(nn.Module): """ SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : int Number of channels. reduction : int, default 16 Squeeze reduction value. round_mid : bool, default False Whether to round middle channel number (make divisible by 8). activation : function, or str, or nn.Module, default 'relu' Activation function after the first convolution. out_activation : function, or str, or nn.Module, default 'sigmoid' Activation function after the last convolution. """ def __init__(self, channels, reduction=16, round_mid=False, mid_activation=(lambda: nn.ReLU(inplace=True)), out_activation=(lambda: nn.Sigmoid())): super(SEBlock, self).__init__() self.use_conv2 = (reduction > 1) mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction) self.pool = nn.AdaptiveAvgPool2d(output_size=1) self.fc1 = nn.Linear( in_features=channels, out_features=mid_channels) if self.use_conv2: self.activ = get_activation_layer(mid_activation) self.fc2 = nn.Linear( in_features=mid_channels, out_features=channels) self.sigmoid = get_activation_layer(out_activation) def forward(self, x): w = self.pool(x) w = w.squeeze(dim=-1).squeeze(dim=-1) w = self.fc1(w) if self.use_conv2: w = self.activ(w) w = self.fc2(w) w = self.sigmoid(w) w = w.unsqueeze(dim=-1).unsqueeze(dim=-1) x = x * w return x class DwsConvBlock(nn.Module): """ SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. se_reduction : int, default 0 Squeeze reduction value (0 means no-se). """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: nn.ReLU(inplace=True)), pw_activation=(lambda: nn.ReLU(inplace=True)), se_reduction=0): super(DwsConvBlock, self).__init__() self.use_se = (se_reduction > 0) self.dw_conv = dwconv_block( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation) if self.use_se: self.se = SEBlock( channels=in_channels, reduction=se_reduction, round_mid=False, mid_activation=(lambda: nn.PReLU(in_channels // se_reduction)), out_activation=(lambda: nn.PReLU(in_channels))) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation) def forward(self, x): x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) return x def dwsconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: nn.ReLU(inplace=True)), pw_activation=(lambda: nn.ReLU(inplace=True)), se_reduction=0): """ 3x3 depthwise separable version of the standard convolution block (SINet version). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. dw_use_bn : bool, default True Whether to use BatchNorm layer (depthwise convolution block). pw_use_bn : bool, default True Whether to use BatchNorm layer (pointwise convolution block). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. dw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the depthwise convolution block. pw_activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the pointwise convolution block. se_reduction : int, default 0 Squeeze reduction value (0 means no-se). """ return DwsConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, dw_use_bn=dw_use_bn, pw_use_bn=pw_use_bn, bn_eps=bn_eps, dw_activation=dw_activation, pw_activation=pw_activation, se_reduction=se_reduction) def dwconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 depthwise version of the standard convolution block (SINet version). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, activation=activation) class FDWConvBlock(nn.Module): """ Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function after the each convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(FDWConvBlock, self).__init__() assert use_bn self.activate = (activation is not None) self.v_conv = dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=(kernel_size, 1), stride=stride, padding=(padding, 0), dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=None) self.h_conv = dwconv_block( in_channels=in_channels, out_channels=out_channels, kernel_size=(1, kernel_size), stride=stride, padding=(0, padding), dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=None) if self.activate: self.act = get_activation_layer(activation) def forward(self, x): x = self.v_conv(x) + self.h_conv(x) if self.activate: x = self.act(x) return x def fdwconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 factorized depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return FDWConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def fdwconv5x5_block(in_channels, out_channels, stride=1, padding=2, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 5x5 factorized depthwise version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return FDWConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class SBBlock(nn.Module): """ SB-block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int Convolution window size for a factorized depthwise separable convolution block. scale_factor : int Scale factor. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, kernel_size, scale_factor, bn_eps): super(SBBlock, self).__init__() self.use_scale = (scale_factor > 1) if self.use_scale: self.down_scale = nn.AvgPool2d( kernel_size=scale_factor, stride=scale_factor) self.up_scale = InterpolationBlock(scale_factor=scale_factor) use_fdw = (scale_factor > 0) if use_fdw: fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block self.conv1 = fdwconv3x3_class( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(in_channels))) else: self.conv1 = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(in_channels))) self.conv2 = conv1x1( in_channels=in_channels, out_channels=out_channels) self.bn = nn.BatchNorm2d( num_features=out_channels, eps=bn_eps) def forward(self, x): if self.use_scale: x = self.down_scale(x) x = self.conv1(x) x = self.conv2(x) if self.use_scale: x = self.up_scale(x) x = self.bn(x) return x class PreActivation(nn.Module): """ PreResNet like pure pre-activation block without convolution layer. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(PreActivation, self).__init__() self.bn = nn.BatchNorm2d( num_features=in_channels, eps=bn_eps) self.activ = nn.PReLU(num_parameters=in_channels) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class ESPBlock(nn.Module): """ ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_sizes : list of int Convolution window size for branches. scale_factors : list of int Scale factor for branches. use_residual : bool Whether to use residual connection. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors, use_residual, bn_eps): super(ESPBlock, self).__init__() self.use_residual = use_residual groups = len(kernel_sizes) mid_channels = int(out_channels / groups) res_channels = out_channels - groups * mid_channels self.conv = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=groups) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.branches = Concurrent() for i in range(groups): out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels self.branches.add_module("branch{}".format(i + 1), SBBlock( in_channels=mid_channels, out_channels=out_channels_i, kernel_size=kernel_sizes[i], scale_factor=scale_factors[i], bn_eps=bn_eps)) self.preactiv = PreActivation( in_channels=out_channels, bn_eps=bn_eps) def forward(self, x): if self.use_residual: identity = x x = self.conv(x) x = self.c_shuffle(x) x = self.branches(x) if self.use_residual: x = identity + x x = self.preactiv(x) return x class SBStage(nn.Module): """ SB stage. Parameters: ---------- in_channels : int Number of input channels. down_channels : int Number of output channels for a downscale block. channels_list : list of int Number of output channels for all residual block. kernel_sizes_list : list of int Convolution window size for branches. scale_factors_list : list of int Scale factor for branches. use_residual_list : list of int List of flags for using residual in each ESP-block. se_reduction : int Squeeze reduction value (0 means no-se). bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, down_channels, channels_list, kernel_sizes_list, scale_factors_list, use_residual_list, se_reduction, bn_eps): super(SBStage, self).__init__() self.down_conv = dwsconv3x3_block( in_channels=in_channels, out_channels=down_channels, stride=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: nn.PReLU(down_channels)), se_reduction=se_reduction) in_channels = down_channels self.main_branch = nn.Sequential() for i, out_channels in enumerate(channels_list): use_residual = (use_residual_list[i] == 1) kernel_sizes = kernel_sizes_list[i] scale_factors = scale_factors_list[i] self.main_branch.add_module("block{}".format(i + 1), ESPBlock( in_channels=in_channels, out_channels=out_channels, kernel_sizes=kernel_sizes, scale_factors=scale_factors, use_residual=use_residual, bn_eps=bn_eps)) in_channels = out_channels self.preactiv = PreActivation( in_channels=(down_channels + in_channels), bn_eps=bn_eps) def forward(self, x): x = self.down_conv(x) y = self.main_branch(x) x = torch.cat((x, y), dim=1) x = self.preactiv(x) return x, y class SBEncoderInitBlock(nn.Module): """ SB encoder specific initial block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, mid_channels, out_channels, bn_eps): super(SBEncoderInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels))) self.conv2 = dwsconv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: nn.PReLU(out_channels)), se_reduction=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class SBEncoder(nn.Module): """ SB encoder for SINet. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of input channels. init_block_channels : list int Number of output channels for convolutions in the initial block. down_channels_list : list of int Number of downsample channels for each residual block. channels_list : list of list of int Number of output channels for all residual block. kernel_sizes_list : list of list of int Convolution window size for each residual block. scale_factors_list : list of list of int Scale factor for each residual block. use_residual_list : list of list of int List of flags for using residual in each residual block. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, init_block_channels, down_channels_list, channels_list, kernel_sizes_list, scale_factors_list, use_residual_list, bn_eps): super(SBEncoder, self).__init__() self.init_block = SBEncoderInitBlock( in_channels=in_channels, mid_channels=init_block_channels[0], out_channels=init_block_channels[1], bn_eps=bn_eps) in_channels = init_block_channels[1] self.stage1 = SBStage( in_channels=in_channels, down_channels=down_channels_list[0], channels_list=channels_list[0], kernel_sizes_list=kernel_sizes_list[0], scale_factors_list=scale_factors_list[0], use_residual_list=use_residual_list[0], se_reduction=1, bn_eps=bn_eps) in_channels = down_channels_list[0] + channels_list[0][-1] self.stage2 = SBStage( in_channels=in_channels, down_channels=down_channels_list[1], channels_list=channels_list[1], kernel_sizes_list=kernel_sizes_list[1], scale_factors_list=scale_factors_list[1], use_residual_list=use_residual_list[1], se_reduction=2, bn_eps=bn_eps) in_channels = down_channels_list[1] + channels_list[1][-1] self.output = conv1x1( in_channels=in_channels, out_channels=out_channels) def forward(self, x): y1 = self.init_block(x) x, y2 = self.stage1(y1) x, _ = self.stage2(x) x = self.output(x) return x, y2, y1 class SBDecodeBlock(nn.Module): """ SB decoder block for SINet. Parameters: ---------- channels : int Number of output classes. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, bn_eps): super(SBDecodeBlock, self).__init__() self.up = InterpolationBlock( scale_factor=2, align_corners=False) self.bn = nn.BatchNorm2d( num_features=channels, eps=bn_eps) self.conf = nn.Softmax2d() def forward(self, x, y): x = self.up(x) x = self.bn(x) w_conf = self.conf(x) w_max = (torch.max(w_conf, dim=1)[0]).unsqueeze(1).expand_as(x) x = y * (1 - w_max) + x return x class SBDecoder(nn.Module): """ SB decoder for SINet. Parameters: ---------- dim2 : int Size of dimension #2. num_classes : int Number of segmentation classes. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, dim2, num_classes, bn_eps): super(SBDecoder, self).__init__() self.decode1 = SBDecodeBlock( channels=num_classes, bn_eps=bn_eps) self.decode2 = SBDecodeBlock( channels=num_classes, bn_eps=bn_eps) self.conv3c = conv1x1_block( in_channels=dim2, out_channels=num_classes, bn_eps=bn_eps, activation=(lambda: nn.PReLU(num_classes))) self.output = nn.ConvTranspose2d( in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False) self.up = InterpolationBlock(scale_factor=2) def forward(self, y3, y2, y1): y2 = self.conv3c(y2) x = self.decode1(y3, y2) x = self.decode2(x, y1) x = self.output(x) x = self.up(x) return x class SINet(nn.Module): """ SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. Parameters: ---------- down_channels_list : list of int Number of downsample channels for each residual block. channels_list : list of list of int Number of output channels for all residual block. kernel_sizes_list : list of list of int Convolution window size for each residual block. scale_factors_list : list of list of int Scale factor for each residual block. use_residual_list : list of list of int List of flags for using residual in each residual block. dim2 : int Size of dimension #2. bn_eps : float Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 21 Number of segmentation classes. """ def __init__(self, down_channels_list, channels_list, kernel_sizes_list, scale_factors_list, use_residual_list, dim2, bn_eps, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=21): super(SINet, self).__init__() assert (fixed_size is not None) assert (in_channels > 0) assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux init_block_channels = [16, num_classes] out_channels = num_classes self.encoder = SBEncoder( in_channels=in_channels, out_channels=out_channels, init_block_channels=init_block_channels, down_channels_list=down_channels_list, channels_list=channels_list, kernel_sizes_list=kernel_sizes_list, scale_factors_list=scale_factors_list, use_residual_list=use_residual_list, bn_eps=bn_eps) self.decoder = SBDecoder( dim2=dim2, num_classes=num_classes, bn_eps=bn_eps) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): y3, y2, y1 = self.encoder(x) x = self.decoder(y3, y2, y1) if self.aux: return x, y3 else: return x def get_sinet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SINet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ kernel_sizes_list = [ [[3, 5], [3, 3], [3, 3]], [[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]] scale_factors_list = [ [[1, 1], [0, 1], [0, 1]], [[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]] chnn = 4 dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)] dim1 = dims[0] dim2 = dims[1] dim3 = dims[2] dim4 = dims[3] p = len(kernel_sizes_list[0]) q = len(kernel_sizes_list[1]) channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))] use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))] down_channels_list = [dim1, dim2] net = SINet( down_channels_list=down_channels_list, channels_list=channels_list, kernel_sizes_list=kernel_sizes_list, scale_factors_list=scale_factors_list, use_residual_list=use_residual_list, dim2=dims[1], **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sinet_cityscapes(num_classes=19, **kwargs): """ SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sinet(num_classes=num_classes, bn_eps=1e-3, model_name="sinet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (1024, 2048) aux = False fixed_size = True pretrained = False models = [ sinet_cityscapes, ] for model in models: net = model(pretrained=pretrained, aux=aux, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sinet_cityscapes or weight_count == 119418) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys # y.sum().backward() assert (tuple(y.size()) == (batch, 19, in_size[0], in_size[1])) if __name__ == "__main__": _test()
33,876
30.929312
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/shufflenetv2b.py
""" ShuffleNet V2 for ImageNet-1K, implemented in PyTorch. The alternative version. Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. """ __all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock class ShuffleUnit(nn.Module): """ ShuffleNetV2(b) unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. downsample : bool Whether do downsample. use_se : bool Whether to use SE block. use_residual : bool Whether to use residual connection. shuffle_group_first : bool Whether to use channel shuffle in group first mode. """ def __init__(self, in_channels, out_channels, downsample, use_se, use_residual, shuffle_group_first): super(ShuffleUnit, self).__init__() self.downsample = downsample self.use_se = use_se self.use_residual = use_residual mid_channels = out_channels // 2 in_channels2 = in_channels // 2 assert (in_channels % 2 == 0) y2_in_channels = (in_channels if downsample else in_channels2) y2_out_channels = out_channels - y2_in_channels self.conv1 = conv1x1_block( in_channels=y2_in_channels, out_channels=mid_channels) self.dconv = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(2 if self.downsample else 1), activation=None) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=y2_out_channels) if self.use_se: self.se = SEBlock(channels=y2_out_channels) if downsample: self.shortcut_dconv = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, stride=2, activation=None) self.shortcut_conv = conv1x1_block( in_channels=in_channels, out_channels=in_channels) if shuffle_group_first: self.c_shuffle = ChannelShuffle( channels=out_channels, groups=2) else: self.c_shuffle = ChannelShuffle2( channels=out_channels, groups=2) def forward(self, x): if self.downsample: y1 = self.shortcut_dconv(x) y1 = self.shortcut_conv(y1) x2 = x else: y1, x2 = torch.chunk(x, chunks=2, dim=1) y2 = self.conv1(x2) y2 = self.dconv(y2) y2 = self.conv2(y2) if self.use_se: y2 = self.se(y2) if self.use_residual and not self.downsample: y2 = y2 + x2 x = torch.cat((y1, y2), dim=1) x = self.c_shuffle(x) return x class ShuffleInitBlock(nn.Module): """ ShuffleNetV2(b) specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ShuffleInitBlock, self).__init__() self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1, ceil_mode=False) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ShuffleNetV2b(nn.Module): """ ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. use_se : bool, default False Whether to use SE block. use_residual : bool, default False Whether to use residual connections. shuffle_group_first : bool, default True Whether to use channel shuffle in group first mode. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, shuffle_group_first=True, in_channels=3, in_size=(224, 224), num_classes=1000): super(ShuffleNetV2b, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ShuffleInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) stage.add_module("unit{}".format(j + 1), ShuffleUnit( in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual, shuffle_group_first=shuffle_group_first)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_shufflenetv2b(width_scale, shuffle_group_first=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ShuffleNetV2(b) model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. shuffle_group_first : bool, default True Whether to use channel shuffle in group first mode. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 24 final_block_channels = 1024 layers = [4, 8, 4] channels_per_layers = [116, 232, 464] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] if width_scale > 1.5: final_block_channels = int(final_block_channels * width_scale) net = ShuffleNetV2b( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, shuffle_group_first=shuffle_group_first, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shufflenetv2b_wd2(**kwargs): """ ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(12.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_wd2", **kwargs) def shufflenetv2b_w1(**kwargs): """ ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=1.0, shuffle_group_first=True, model_name="shufflenetv2b_w1", **kwargs) def shufflenetv2b_w3d2(**kwargs): """ ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(44.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_w3d2", **kwargs) def shufflenetv2b_w2(**kwargs): """ ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,' https://arxiv.org/abs/1807.11164. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shufflenetv2b( width_scale=(61.0 / 29.0), shuffle_group_first=True, model_name="shufflenetv2b_w2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ shufflenetv2b_wd2, shufflenetv2b_w1, shufflenetv2b_w3d2, shufflenetv2b_w2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shufflenetv2b_wd2 or weight_count == 1366792) assert (model != shufflenetv2b_w1 or weight_count == 2279760) assert (model != shufflenetv2b_w3d2 or weight_count == 4410194) assert (model != shufflenetv2b_w2 or weight_count == 7611290) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,431
30.553299
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sparsenet.py
""" SparseNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. """ __all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264'] import os import math import torch import torch.nn as nn import torch.nn.init as init from .common import pre_conv1x1_block, pre_conv3x3_block from .preresnet import PreResInitBlock, PreResActivation from .densenet import TransitionBlock def sparsenet_exponential_fetch(lst): """ SparseNet's specific exponential fetch. Parameters: ---------- lst : list List of something. Returns: ------- list Filtered list. """ return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))] class SparseBlock(nn.Module): """ SparseNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(SparseBlock, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 mid_channels = out_channels * bn_size self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=out_channels) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) return x class SparseStage(nn.Module): """ SparseNet stage. Parameters: ---------- in_channels : int Number of input channels. channels_per_stage : list of int Number of output channels for each unit in stage. growth_rate : int Growth rate for blocks. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. do_transition : bool Whether use transition block. """ def __init__(self, in_channels, channels_per_stage, growth_rate, dropout_rate, do_transition): super(SparseStage, self).__init__() self.do_transition = do_transition if self.do_transition: self.trans = TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2)) in_channels = in_channels // 2 self.blocks = nn.Sequential() for i, out_channels in enumerate(channels_per_stage): self.blocks.add_module("block{}".format(i + 1), SparseBlock( in_channels=in_channels, out_channels=growth_rate, dropout_rate=dropout_rate)) in_channels = out_channels def forward(self, x): if self.do_transition: x = self.trans(x) outs = [x] for block in self.blocks._modules.values(): y = block(x) outs.append(y) flt_outs = sparsenet_exponential_fetch(outs) x = torch.cat(tuple(flt_outs), dim=1) return x class SparseNet(nn.Module): """ SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. growth_rate : int Growth rate for blocks. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, growth_rate, dropout_rate=0.0, in_channels=3, in_size=(224, 224), num_classes=1000): super(SparseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = SparseStage( in_channels=in_channels, channels_per_stage=channels_per_stage, growth_rate=growth_rate, dropout_rate=dropout_rate, do_transition=(i != 0)) in_channels = channels_per_stage[-1] self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sparsenet(num_layers, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SparseNet model with specific parameters. Parameters: ---------- num_layers : int Number of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if num_layers == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif num_layers == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif num_layers == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif num_layers == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] elif num_layers == 264: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 64, 48] else: raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))], zip([growth_rate] * yi, range(yi)), [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = SparseNet( channels=channels, init_block_channels=init_block_channels, growth_rate=growth_rate, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sparsenet121(**kwargs): """ SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs) def sparsenet161(**kwargs): """ SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs) def sparsenet169(**kwargs): """ SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs) def sparsenet201(**kwargs): """ SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs) def sparsenet264(**kwargs): """ SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ sparsenet121, sparsenet161, sparsenet169, sparsenet201, sparsenet264, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sparsenet121 or weight_count == 3250824) assert (model != sparsenet161 or weight_count == 9853288) assert (model != sparsenet169 or weight_count == 4709864) assert (model != sparsenet201 or weight_count == 5703144) assert (model != sparsenet264 or weight_count == 7717224) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,646
29.569554
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/menet.py
""" MENet for ImageNet-1K, implemented in PyTorch. Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. """ __all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4', 'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle class MEUnit(nn.Module): """ MENet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. side_channels : int Number of side channels. groups : int Number of groups in convolution layers. downsample : bool Whether do downsample. ignore_group : bool Whether ignore group value in the first convolution layer. """ def __init__(self, in_channels, out_channels, side_channels, groups, downsample, ignore_group): super(MEUnit, self).__init__() self.downsample = downsample mid_channels = out_channels // 4 if downsample: out_channels -= in_channels # residual branch self.compress_conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=(1 if ignore_group else groups)) self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels) self.c_shuffle = ChannelShuffle( channels=mid_channels, groups=groups) self.dw_conv2 = depthwise_conv3x3( channels=mid_channels, stride=(2 if self.downsample else 1)) self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels) self.expand_conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, groups=groups) self.expand_bn3 = nn.BatchNorm2d(num_features=out_channels) if downsample: self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) self.activ = nn.ReLU(inplace=True) # fusion branch self.s_merge_conv = conv1x1( in_channels=mid_channels, out_channels=side_channels) self.s_merge_bn = nn.BatchNorm2d(num_features=side_channels) self.s_conv = conv3x3( in_channels=side_channels, out_channels=side_channels, stride=(2 if self.downsample else 1)) self.s_conv_bn = nn.BatchNorm2d(num_features=side_channels) self.s_evolve_conv = conv1x1( in_channels=side_channels, out_channels=mid_channels) self.s_evolve_bn = nn.BatchNorm2d(num_features=mid_channels) def forward(self, x): identity = x # pointwise group convolution 1 x = self.compress_conv1(x) x = self.compress_bn1(x) x = self.activ(x) x = self.c_shuffle(x) # merging y = self.s_merge_conv(x) y = self.s_merge_bn(y) y = self.activ(y) # depthwise convolution (bottleneck) x = self.dw_conv2(x) x = self.dw_bn2(x) # evolution y = self.s_conv(y) y = self.s_conv_bn(y) y = self.activ(y) y = self.s_evolve_conv(y) y = self.s_evolve_bn(y) y = torch.sigmoid(y) x = x * y # pointwise group convolution 2 x = self.expand_conv3(x) x = self.expand_bn3(x) # identity branch if self.downsample: identity = self.avgpool(identity) x = torch.cat((x, identity), dim=1) else: x = x + identity x = self.activ(x) return x class MEInitBlock(nn.Module): """ MENet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(MEInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class MENet(nn.Module): """ MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, side_channels, groups, in_channels=3, in_size=(224, 224), num_classes=1000): super(MENet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", MEInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): downsample = (j == 0) ignore_group = (i == 0) and (j == 0) stage.add_module("unit{}".format(j + 1), MEUnit( in_channels=in_channels, out_channels=out_channels, side_channels=side_channels, groups=groups, downsample=downsample, ignore_group=ignore_group)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_menet(first_stage_channels, side_channels, groups, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MENet model with specific parameters. Parameters: ---------- first_stage_channels : int Number of output channels at the first stage. side_channels : int Number of side channels in a ME-unit. groups : int Number of groups in convolution layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ layers = [4, 8, 4] if first_stage_channels == 108: init_block_channels = 12 channels_per_layers = [108, 216, 432] elif first_stage_channels == 128: init_block_channels = 12 channels_per_layers = [128, 256, 512] elif first_stage_channels == 160: init_block_channels = 16 channels_per_layers = [160, 320, 640] elif first_stage_channels == 228: init_block_channels = 24 channels_per_layers = [228, 456, 912] elif first_stage_channels == 256: init_block_channels = 24 channels_per_layers = [256, 512, 1024] elif first_stage_channels == 348: init_block_channels = 24 channels_per_layers = [348, 696, 1392] elif first_stage_channels == 352: init_block_channels = 24 channels_per_layers = [352, 704, 1408] elif first_stage_channels == 456: init_block_channels = 48 channels_per_layers = [456, 912, 1824] else: raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = MENet( channels=channels, init_block_channels=init_block_channels, side_channels=side_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def menet108_8x1_g3(**kwargs): """ 108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs) def menet128_8x1_g4(**kwargs): """ 128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs) def menet160_8x1_g8(**kwargs): """ 160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs) def menet228_12x1_g3(**kwargs): """ 228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs) def menet256_12x1_g4(**kwargs): """ 256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs) def menet348_12x1_g3(**kwargs): """ 348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs) def menet352_12x1_g8(**kwargs): """ 352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs) def menet456_24x1_g3(**kwargs): """ 456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,' https://arxiv.org/abs/1803.09127. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ menet108_8x1_g3, menet128_8x1_g4, menet160_8x1_g8, menet228_12x1_g3, menet256_12x1_g4, menet348_12x1_g3, menet352_12x1_g8, menet456_24x1_g3, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != menet108_8x1_g3 or weight_count == 654516) assert (model != menet128_8x1_g4 or weight_count == 750796) assert (model != menet160_8x1_g8 or weight_count == 850120) assert (model != menet228_12x1_g3 or weight_count == 1806568) assert (model != menet256_12x1_g4 or weight_count == 1888240) assert (model != menet348_12x1_g3 or weight_count == 3368128) assert (model != menet352_12x1_g8 or weight_count == 2272872) assert (model != menet456_24x1_g3 or weight_count == 5304784) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
15,917
31.956522
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/voca.py
""" VOCA for speech-driven facial animation, implemented in PyTorch. Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. """ __all__ = ['VOCA', 'voca8flame'] import os import torch import torch.nn as nn import torch.nn.functional as F from .common import ConvBlock class VocaEncoder(nn.Module): """ VOCA encoder. Parameters: ---------- audio_features : int Number of audio features (characters/sounds). audio_window_size : int Size of audio window (for time related audio features). base_persons : int Number of base persons (subjects). encoder_features : int Number of encoder features. """ def __init__(self, audio_features, audio_window_size, base_persons, encoder_features): super(VocaEncoder, self).__init__() self.audio_window_size = audio_window_size channels = (32, 32, 64, 64) fc1_channels = 128 self.bn = nn.BatchNorm2d(num_features=1) in_channels = audio_features + base_persons self.branch = nn.Sequential() for i, out_channels in enumerate(channels): self.branch.add_module("conv{}".format(i + 1), ConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0), bias=True, use_bn=False)) in_channels = out_channels in_channels += base_persons self.fc1 = nn.Linear( in_features=in_channels, out_features=fc1_channels) self.fc2 = nn.Linear( in_features=fc1_channels, out_features=encoder_features) def forward(self, x, pid): x = self.bn(x) x = x.transpose(1, 3).contiguous() y = pid.unsqueeze(-1).unsqueeze(-1) y = y.repeat(1, 1, self.audio_window_size, 1) x = torch.cat((x, y), dim=1) x = self.branch(x) x = x.view(x.size(0), -1) x = torch.cat((x, pid), dim=1) x = self.fc1(x) x = x.tanh() x = self.fc2(x) return x class VOCA(nn.Module): """ VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. Parameters: ---------- audio_features : int, default 29 Number of audio features (characters/sounds). audio_window_size : int, default 16 Size of audio window (for time related audio features). base_persons : int, default 8 Number of base persons (subjects). encoder_features : int, default 50 Number of encoder features. vertices : int, default 5023 Number of 3D geometry vertices. """ def __init__(self, audio_features=29, audio_window_size=16, base_persons=8, encoder_features=50, vertices=5023): super(VOCA, self).__init__() self.base_persons = base_persons self.encoder = VocaEncoder( audio_features=audio_features, audio_window_size=audio_window_size, base_persons=base_persons, encoder_features=encoder_features) self.decoder = nn.Linear( in_features=encoder_features, out_features=(3 * vertices)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x, pid): pid = F.one_hot(pid.long(), num_classes=self.base_persons).type_as(pid) x = self.encoder(x, pid) x = self.decoder(x) x = x.view(x.size(0), 1, -1, 3) return x def get_voca(base_persons, vertices, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create VOCA model with specific parameters. Parameters: ---------- base_persons : int Number of base persons (subjects). vertices : int Number of 3D geometry vertices. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = VOCA( base_persons=base_persons, vertices=vertices, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def voca8flame(**kwargs): """ VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ voca8flame, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != voca8flame or weight_count == 809563) batch = 14 audio_features = 29 audio_window_size = 16 vertices = 5023 x = torch.randn(batch, 1, audio_window_size, audio_features) pid = torch.full(size=(batch,), fill_value=3) y = net(x, pid) # y.sum().backward() assert (y.shape == (batch, 1, vertices, 3)) if __name__ == "__main__": _test()
6,683
28.575221
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/shakeshakeresnet_cifar.py
""" Shake-Shake-ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. """ __all__ = ['CIFARShakeShakeResNet', 'shakeshakeresnet20_2x16d_cifar10', 'shakeshakeresnet20_2x16d_cifar100', 'shakeshakeresnet20_2x16d_svhn', 'shakeshakeresnet26_2x32d_cifar10', 'shakeshakeresnet26_2x32d_cifar100', 'shakeshakeresnet26_2x32d_svhn'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv3x3_block from .resnet import ResBlock, ResBottleneck class ShakeShake(torch.autograd.Function): """ Shake-Shake function. """ @staticmethod def forward(ctx, x1, x2, alpha): y = alpha * x1 + (1 - alpha) * x2 return y @staticmethod def backward(ctx, dy): beta = torch.rand(dy.size(0), dtype=dy.dtype, device=dy.device).view(-1, 1, 1, 1) return beta * dy, (1 - beta) * dy, None class ShakeShakeShortcut(nn.Module): """ Shake-Shake-ResNet shortcut. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(ShakeShakeShortcut, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 self.pool = nn.AvgPool2d( kernel_size=1, stride=stride) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels) self.bn = nn.BatchNorm2d(num_features=out_channels) self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0)) def forward(self, x): x1 = self.pool(x) x1 = self.conv1(x1) x2 = x[:, :, :-1, :-1].contiguous() x2 = self.pad(x2) x2 = self.pool(x2) x2 = self.conv2(x2) x = torch.cat((x1, x2), dim=1) x = self.bn(x) return x class ShakeShakeResUnit(nn.Module): """ Shake-Shake-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(ShakeShakeResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) branch_class = ResBottleneck if bottleneck else ResBlock self.branch1 = branch_class( in_channels=in_channels, out_channels=out_channels, stride=stride) self.branch2 = branch_class( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_branch = ShakeShakeShortcut( in_channels=in_channels, out_channels=out_channels, stride=stride) self.activ = nn.ReLU(inplace=True) self.shake_shake = ShakeShake.apply def forward(self, x): if self.resize_identity: identity = self.identity_branch(x) else: identity = x x1 = self.branch1(x) x2 = self.branch2(x) if self.training: alpha = torch.rand(x1.size(0), dtype=x1.dtype, device=x1.device).view(-1, 1, 1, 1) x = self.shake_shake(x1, x2, alpha) else: x = 0.5 * (x1 + x2) x = x + identity x = self.activ(x) return x class CIFARShakeShakeResNet(nn.Module): """ Shake-Shake-ResNet model for CIFAR from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARShakeShakeResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ShakeShakeResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_shakeshakeresnet_cifar(classes, blocks, bottleneck, first_stage_channels=16, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Shake-Shake-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. first_stage_channels : int, default 16 Number of output channels for the first stage. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 from functools import reduce channels_per_layers = reduce(lambda x, y: x + [x[-1] * 2], range(2), [first_stage_channels]) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARShakeShakeResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def shakeshakeresnet20_2x16d_cifar10(classes=10, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_cifar10", **kwargs) def shakeshakeresnet20_2x16d_cifar100(classes=100, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_cifar100", **kwargs) def shakeshakeresnet20_2x16d_svhn(classes=10, **kwargs): """ Shake-Shake-ResNet-20-2x16d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16, model_name="shakeshakeresnet20_2x16d_svhn", **kwargs) def shakeshakeresnet26_2x32d_cifar10(classes=10, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_cifar10", **kwargs) def shakeshakeresnet26_2x32d_cifar100(classes=100, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_cifar100", **kwargs) def shakeshakeresnet26_2x32d_svhn(classes=10, **kwargs): """ Shake-Shake-ResNet-26-2x32d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32, model_name="shakeshakeresnet26_2x32d_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (shakeshakeresnet20_2x16d_cifar10, 10), (shakeshakeresnet20_2x16d_cifar100, 100), (shakeshakeresnet20_2x16d_svhn, 10), (shakeshakeresnet26_2x32d_cifar10, 10), (shakeshakeresnet26_2x32d_cifar100, 100), (shakeshakeresnet26_2x32d_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != shakeshakeresnet20_2x16d_cifar10 or weight_count == 541082) assert (model != shakeshakeresnet20_2x16d_cifar100 or weight_count == 546932) assert (model != shakeshakeresnet20_2x16d_svhn or weight_count == 541082) assert (model != shakeshakeresnet26_2x32d_cifar10 or weight_count == 2923162) assert (model != shakeshakeresnet26_2x32d_cifar100 or weight_count == 2934772) assert (model != shakeshakeresnet26_2x32d_svhn or weight_count == 2923162) x = torch.randn(14, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, num_classes)) if __name__ == "__main__": _test()
14,392
33.269048
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sqnet.py
""" SQNet for image segmentation, implemented in PyTorch. Original paper: 'Speeding up Semantic Segmentation for Autonomous Driving,' https://openreview.net/pdf?id=S1uHiFyyg. """ __all__ = ['SQNet', 'sqnet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, deconv3x3_block, Concurrent, Hourglass class FireBlock(nn.Module): """ SQNet specific encoder block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, bias, use_bn, activation): super(FireBlock, self).__init__() squeeze_channels = out_channels // 8 expand_channels = out_channels // 2 self.conv = conv1x1_block( in_channels=in_channels, out_channels=squeeze_channels, bias=bias, use_bn=use_bn, activation=activation) self.branches = Concurrent(merge_type="cat") self.branches.add_module("branch1", conv1x1_block( in_channels=squeeze_channels, out_channels=expand_channels, bias=bias, use_bn=use_bn, activation=None)) self.branches.add_module("branch2", conv3x3_block( in_channels=squeeze_channels, out_channels=expand_channels, bias=bias, use_bn=use_bn, activation=None)) self.activ = nn.ELU(inplace=True) def forward(self, x): x = self.conv(x) x = self.branches(x) x = self.activ(x) return x class ParallelDilatedConv(nn.Module): """ SQNet specific decoder block (parallel dilated convolution). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, bias, use_bn, activation): super(ParallelDilatedConv, self).__init__() dilations = [1, 2, 3, 4] self.branches = Concurrent(merge_type="sum") for i, dilation in enumerate(dilations): self.branches.add_module("branch{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, padding=dilation, dilation=dilation, bias=bias, use_bn=use_bn, activation=activation)) def forward(self, x): x = self.branches(x) return x class SQNetUpStage(nn.Module): """ SQNet upscale stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bias : bool Whether the layer uses a bias vector. use_bn : bool Whether to use BatchNorm layer. activation : function or str or None Activation function or name of activation function. use_parallel_conv : bool Whether to use parallel dilated convolution. """ def __init__(self, in_channels, out_channels, bias, use_bn, activation, use_parallel_conv): super(SQNetUpStage, self).__init__() if use_parallel_conv: self.conv = ParallelDilatedConv( in_channels=in_channels, out_channels=in_channels, bias=bias, use_bn=use_bn, activation=activation) else: self.conv = conv3x3_block( in_channels=in_channels, out_channels=in_channels, bias=bias, use_bn=use_bn, activation=activation) self.deconv = deconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bias=bias, use_bn=use_bn, activation=activation) def forward(self, x): x = self.conv(x) x = self.deconv(x) return x class SQNet(nn.Module): """ SQNet model from 'Speeding up Semantic Segmentation for Autonomous Driving,' https://openreview.net/pdf?id=S1uHiFyyg. Parameters: ---------- channels : list of list of int Number of output channels for each stage in encoder and decoder. init_block_channels : int Number of output channels for the initial unit. layers : list of int Number of layers for each stage in encoder. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, init_block_channels, layers, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(SQNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = True use_bn = False activation = (lambda: nn.ELU(inplace=True)) self.stem = conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, bias=bias, use_bn=use_bn, activation=activation) in_channels = init_block_channels down_seq = nn.Sequential() skip_seq = nn.Sequential() for i, out_channels in enumerate(channels[0]): skip_seq.add_module("skip{}".format(i + 1), conv3x3_block( in_channels=in_channels, out_channels=in_channels, bias=bias, use_bn=use_bn, activation=activation)) stage = nn.Sequential() stage.add_module("unit1", nn.MaxPool2d( kernel_size=2, stride=2)) for j in range(layers[i]): stage.add_module("unit{}".format(j + 2), FireBlock( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=activation)) in_channels = out_channels down_seq.add_module("down{}".format(i + 1), stage) in_channels = in_channels // 2 up_seq = nn.Sequential() for i, out_channels in enumerate(channels[1]): use_parallel_conv = True if i == 0 else False up_seq.add_module("up{}".format(i + 1), SQNetUpStage( in_channels=(2 * in_channels), out_channels=out_channels, bias=bias, use_bn=use_bn, activation=activation, use_parallel_conv=use_parallel_conv)) in_channels = out_channels up_seq = up_seq[::-1] self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, merge_type="cat") self.head = SQNetUpStage( in_channels=(2 * in_channels), out_channels=num_classes, bias=bias, use_bn=use_bn, activation=activation, use_parallel_conv=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.stem(x) x = self.hg(x) x = self.head(x) return x def get_sqnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SQNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[128, 256, 512], [256, 128, 96]] init_block_channels = 96 layers = [2, 2, 3] net = SQNet( channels=channels, init_block_channels=init_block_channels, layers=layers, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sqnet_cityscapes(num_classes=19, **kwargs): """ SQNet model for Cityscapes from 'Speeding up Semantic Segmentation for Autonomous Driving,' https://openreview.net/pdf?id=S1uHiFyyg. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sqnet(num_classes=num_classes, model_name="sqnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ sqnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sqnet_cityscapes or weight_count == 16262771) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
11,602
29.374346
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/wrn_cifar.py
""" WRN for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. """ __all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10', 'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3 from .preresnet import PreResUnit, PreResActivation class CIFARWRN(nn.Module): """ WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARWRN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=False, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_wrn_cifar(num_classes, blocks, width_factor, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create WRN model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. width_factor : int Wide scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert ((blocks - 4) % 6 == 0) layers = [(blocks - 4) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)] net = CIFARWRN( channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def wrn16_10_cifar10(num_classes=10, **kwargs): """ WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs) def wrn16_10_cifar100(num_classes=100, **kwargs): """ WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs) def wrn16_10_svhn(num_classes=10, **kwargs): """ WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs) def wrn28_10_cifar10(num_classes=10, **kwargs): """ WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs) def wrn28_10_cifar100(num_classes=100, **kwargs): """ WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs) def wrn28_10_svhn(num_classes=10, **kwargs): """ WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs) def wrn40_8_cifar10(num_classes=10, **kwargs): """ WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs) def wrn40_8_cifar100(num_classes=100, **kwargs): """ WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs) def wrn40_8_svhn(num_classes=10, **kwargs): """ WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (wrn16_10_cifar10, 10), (wrn16_10_cifar100, 100), (wrn16_10_svhn, 10), (wrn28_10_cifar10, 10), (wrn28_10_cifar100, 100), (wrn28_10_svhn, 10), (wrn40_8_cifar10, 10), (wrn40_8_cifar100, 100), (wrn40_8_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn16_10_cifar10 or weight_count == 17116634) assert (model != wrn16_10_cifar100 or weight_count == 17174324) assert (model != wrn16_10_svhn or weight_count == 17116634) assert (model != wrn28_10_cifar10 or weight_count == 36479194) assert (model != wrn28_10_cifar100 or weight_count == 36536884) assert (model != wrn28_10_svhn or weight_count == 36479194) assert (model != wrn40_8_cifar10 or weight_count == 35748314) assert (model != wrn40_8_cifar100 or weight_count == 35794484) assert (model != wrn40_8_svhn or weight_count == 35748314) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
11,329
33.126506
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/inceptionresnetv2.py
""" InceptionResNetV2 for ImageNet-1K, implemented in PyTorch. Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. """ __all__ = ['InceptionResNetV2', 'inceptionresnetv2'] import os import torch.nn as nn from .common import conv1x1_block, conv3x3_block, Concurrent from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit class InceptBlock5b(nn.Module): """ InceptionResNetV2 type Mixed-5b block. Parameters: ---------- bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, bn_eps): super(InceptBlock5b, self).__init__() in_channels = 192 self.branches = Concurrent() self.branches.add_module("branch1", Conv1x1Branch( in_channels=in_channels, out_channels=96, bn_eps=bn_eps)) self.branches.add_module("branch2", ConvSeqBranch( in_channels=in_channels, out_channels_list=(48, 64), kernel_size_list=(1, 5), strides_list=(1, 1), padding_list=(0, 2), bn_eps=bn_eps)) self.branches.add_module("branch3", ConvSeqBranch( in_channels=in_channels, out_channels_list=(64, 96, 96), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) self.branches.add_module("branch4", AvgPoolBranch( in_channels=in_channels, out_channels=64, bn_eps=bn_eps, count_include_pad=False)) def forward(self, x): x = self.branches(x) return x class InceptInitBlock(nn.Module): """ InceptionResNetV2 specific initial block. Parameters: ---------- in_channels : int Number of input channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block( in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block( in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool1 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block( in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block( in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.pool2 = nn.MaxPool2d( kernel_size=3, stride=2, padding=0) self.block = InceptBlock5b(bn_eps=bn_eps) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool1(x) x = self.conv4(x) x = self.conv5(x) x = self.pool2(x) x = self.block(x) return x class InceptionResNetV2(nn.Module): """ InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- dropout_rate : float, default 0.0 Fraction of the input units to drop. Must be a number between 0 and 1. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (299, 299) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, dropout_rate=0.0, bn_eps=1e-5, in_channels=3, in_size=(299, 299), num_classes=1000): super(InceptionResNetV2, self).__init__() self.in_size = in_size self.num_classes = num_classes layers = [10, 21, 11] in_channels_list = [320, 1088, 2080] normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]] reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = nn.Sequential() self.features.add_module("init_block", InceptInitBlock( in_channels=in_channels, bn_eps=bn_eps)) in_channels = in_channels_list[0] for i, layers_per_stage in enumerate(layers): stage = nn.Sequential() for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] out_channels_list_per_stage = reduction_out_channels_list[i - 1] else: unit = normal_units[i] out_channels_list_per_stage = normal_out_channels_list[i] if (i == len(layers) - 1) and (j == layers_per_stage - 1): unit_kwargs = {"scale": 1.0, "activate": False} else: unit_kwargs = {} stage.add_module("unit{}".format(j + 1), unit( in_channels=in_channels, out_channels_list=out_channels_list_per_stage, bn_eps=bn_eps, **unit_kwargs)) if (j == 0) and (i != 0): in_channels = in_channels_list[i] self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_conv", conv1x1_block( in_channels=in_channels, out_channels=1536, bn_eps=bn_eps)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Sequential() if dropout_rate > 0.0: self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=1536, out_features=num_classes)) self._init_params() def _init_params(self): for module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_inceptionresnetv2(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create InceptionResNetV2 model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = InceptionResNetV2(**kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def inceptionresnetv2(**kwargs): """ InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,' https://arxiv.org/abs/1602.07261. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_eps=1e-3, **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ inceptionresnetv2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != inceptionresnetv2 or weight_count == 55843464) x = torch.randn(1, 3, 299, 299) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,577
30.926667
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ghostnet.py
""" GhostNet for ImageNet-1K, implemented in PyTorch. Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. """ __all__ = ['GhostNet', 'ghostnet'] import os import math import torch import torch.nn as nn from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\ dwsconv3x3_block, SEBlock class GhostHSigmoid(nn.Module): """ Approximated sigmoid function, specific for GhostNet. """ def forward(self, x): return torch.clamp(x, min=0.0, max=1.0) class GhostConvBlock(nn.Module): """ GhostNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, activation=(lambda: nn.ReLU(inplace=True))): super(GhostConvBlock, self).__init__() main_out_channels = math.ceil(0.5 * out_channels) cheap_out_channels = out_channels - main_out_channels self.main_conv = conv1x1_block( in_channels=in_channels, out_channels=main_out_channels, activation=activation) self.cheap_conv = dwconv3x3_block( in_channels=main_out_channels, out_channels=cheap_out_channels, activation=activation) def forward(self, x): x = self.main_conv(x) y = self.cheap_conv(x) return torch.cat((x, y), dim=1) class GhostExpBlock(nn.Module): """ GhostNet expansion block for residual path in GhostNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : float Expansion factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, use_kernel3, exp_factor, use_se): super(GhostExpBlock, self).__init__() self.use_dw_conv = (stride != 1) self.use_se = use_se mid_channels = int(math.ceil(exp_factor * in_channels)) self.exp_conv = GhostConvBlock( in_channels=in_channels, out_channels=mid_channels) if self.use_dw_conv: dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block self.dw_conv = dw_conv_class( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=None) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=4, out_activation=GhostHSigmoid()) self.pw_conv = GhostConvBlock( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.exp_conv(x) if self.use_dw_conv: x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) return x class GhostUnit(nn.Module): """ GhostNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : float Expansion factor. use_se : bool Whether to use SE-module. """ def __init__(self, in_channels, out_channels, stride, use_kernel3, exp_factor, use_se): super(GhostUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = GhostExpBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, use_se=use_se) if self.resize_identity: self.identity_conv = dwsconv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, pw_activation=None) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity return x class GhostClassifier(nn.Module): """ GhostNet classifier. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. """ def __init__(self, in_channels, out_channels, mid_channels): super(GhostClassifier, self).__init__() self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class GhostNet(nn.Module): """ GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. classifier_mid_channels : int Number of middle channels for classifier. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. use_se : list of list of int/bool Using SE-block flag for each unit. first_stride : bool Whether to use stride for the first stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, exp_factors, use_se, first_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(GhostNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] use_se_flag = use_se[i][j] == 1 stage.add_module("unit{}".format(j + 1), GhostUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, use_se=use_se_flag)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = GhostClassifier( in_channels=in_channels, out_channels=num_classes, mid_channels=classifier_mid_channels) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_ghostnet(width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create GhostNet model with specific parameters. Parameters: ---------- width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 16 channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]] kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]] exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]] use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]] final_block_channels = 960 classifier_mid_channels = 1280 first_stride = False if width_scale != 1.0: channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale, divisor=4) if width_scale > 1.0: final_block_channels = round_channels(final_block_channels * width_scale, divisor=4) net = GhostNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, classifier_mid_channels=classifier_mid_channels, kernels3=kernels3, exp_factors=exp_factors, use_se=use_se, first_stride=first_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ghostnet(**kwargs): """ GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ghostnet(model_name="ghostnet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ ghostnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ghostnet or weight_count == 5180840) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,819
30.268293
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/efficientnet.py
""" EfficientNet for ImageNet-1K, implemented in PyTorch. Original papers: - 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946, - 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665. """ __all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b', 'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b', 'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c', 'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c'] import os import math import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock def calc_tf_padding(x, kernel_size, stride=1, dilation=1): """ Calculate TF-same like padding size. Parameters: ---------- x : tensor Input tensor. kernel_size : int Convolution window size. stride : int, default 1 Strides of the convolution. dilation : int, default 1 Dilation value for convolution layer. Returns: ------- tuple of 4 int The size of the padding. """ height, width = x.size()[2:] oh = math.ceil(float(height) / stride) ow = math.ceil(float(width) / stride) pad_h = max((oh - 1) * stride + (kernel_size - 1) * dilation + 1 - height, 0) pad_w = max((ow - 1) * stride + (kernel_size - 1) * dilation + 1 - width, 0) return pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2 class EffiDwsConvUnit(nn.Module): """ EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, stride, bn_eps, activation, tf_mode): super(EffiDwsConvUnit, self).__init__() self.tf_mode = tf_mode self.residual = (in_channels == out_channels) and (stride == 1) self.dw_conv = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, padding=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation) self.se = SEBlock( channels=in_channels, reduction=4, mid_activation=activation) self.pw_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def forward(self, x): if self.residual: identity = x if self.tf_mode: x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3)) x = self.dw_conv(x) x = self.se(x) x = self.pw_conv(x) if self.residual: x = x + identity return x class EffiInvResUnit(nn.Module): """ EfficientNet inverted residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the second convolution layer. exp_factor : int Factor for expansion of channels. se_factor : int SE reduction factor for each unit. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, kernel_size, stride, exp_factor, se_factor, bn_eps, activation, tf_mode): super(EffiInvResUnit, self).__init__() self.kernel_size = kernel_size self.stride = stride self.tf_mode = tf_mode self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = se_factor > 0 mid_channels = in_channels * exp_factor dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) self.conv2 = dwconv_block_fn( in_channels=mid_channels, out_channels=mid_channels, stride=stride, padding=(0 if tf_mode else (kernel_size // 2)), bn_eps=bn_eps, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), mid_activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def forward(self, x): if self.residual: identity = x x = self.conv1(x) if self.tf_mode: x = F.pad(x, pad=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride)) x = self.conv2(x) if self.use_se: x = self.se(x) x = self.conv3(x) if self.residual: x = x + identity return x class EffiInitBlock(nn.Module): """ EfficientNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. tf_mode : bool Whether to use TF-like mode. """ def __init__(self, in_channels, out_channels, bn_eps, activation, tf_mode): super(EffiInitBlock, self).__init__() self.tf_mode = tf_mode self.conv = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, padding=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation) def forward(self, x): if self.tf_mode: x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3, stride=2)) x = self.conv(x) return x class EfficientNet(nn.Module): """ EfficientNet model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernel_sizes : list of list of int Number of kernel sizes for each unit. strides_per_stage : list int Stride value for the first unit of each stage. expansion_factors : list of list of int Number of expansion factors for each unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernel_sizes, strides_per_stage, expansion_factors, dropout_rate=0.2, tf_mode=False, bn_eps=1e-5, in_channels=3, in_size=(224, 224), num_classes=1000): super(EfficientNet, self).__init__() self.in_size = in_size self.num_classes = num_classes activation = "swish" self.features = nn.Sequential() self.features.add_module("init_block", EffiInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): kernel_sizes_per_stage = kernel_sizes[i] expansion_factors_per_stage = expansion_factors[i] stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): kernel_size = kernel_sizes_per_stage[j] expansion_factor = expansion_factors_per_stage[j] stride = strides_per_stage[i] if (j == 0) else 1 if i == 0: stage.add_module("unit{}".format(j + 1), EffiDwsConvUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) else: stage.add_module("unit{}".format(j + 1), EffiInvResUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, exp_factor=expansion_factor, se_factor=4, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, activation=activation)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Sequential() if dropout_rate > 0.0: self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_efficientnet(version, in_size, tf_mode=False, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create EfficientNet model with specific parameters. Parameters: ---------- version : str Version of EfficientNet ('b0'...'b8'). in_size : tuple of two ints Spatial size of the expected input image. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "b0": assert (in_size == (224, 224)) depth_factor = 1.0 width_factor = 1.0 dropout_rate = 0.2 elif version == "b1": assert (in_size == (240, 240)) depth_factor = 1.1 width_factor = 1.0 dropout_rate = 0.2 elif version == "b2": assert (in_size == (260, 260)) depth_factor = 1.2 width_factor = 1.1 dropout_rate = 0.3 elif version == "b3": assert (in_size == (300, 300)) depth_factor = 1.4 width_factor = 1.2 dropout_rate = 0.3 elif version == "b4": assert (in_size == (380, 380)) depth_factor = 1.8 width_factor = 1.4 dropout_rate = 0.4 elif version == "b5": assert (in_size == (456, 456)) depth_factor = 2.2 width_factor = 1.6 dropout_rate = 0.4 elif version == "b6": assert (in_size == (528, 528)) depth_factor = 2.6 width_factor = 1.8 dropout_rate = 0.5 elif version == "b7": assert (in_size == (600, 600)) depth_factor = 3.1 width_factor = 2.0 dropout_rate = 0.5 elif version == "b8": assert (in_size == (672, 672)) depth_factor = 3.6 width_factor = 2.2 dropout_rate = 0.5 else: raise ValueError("Unsupported EfficientNet version {}".format(version)) init_block_channels = 32 layers = [1, 2, 2, 3, 3, 4, 1] downsample = [1, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 40, 80, 112, 192, 320] expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6] kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3] strides_per_stage = [1, 2, 2, 2, 1, 2, 1] final_block_channels = 1280 layers = [int(math.ceil(li * depth_factor)) for li in layers] channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), []) kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(kernel_sizes_per_layers, layers, downsample), []) expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(expansion_factors_per_layers, layers, downsample), []) strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(strides_per_stage, layers, downsample), []) strides_per_stage = [si[0] for si in strides_per_stage] init_block_channels = round_channels(init_block_channels * width_factor) if width_factor > 1.0: assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor)) final_block_channels = round_channels(final_block_channels * width_factor) net = EfficientNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernel_sizes=kernel_sizes, strides_per_stage=strides_per_stage, expansion_factors=expansion_factors, dropout_rate=dropout_rate, tf_mode=tf_mode, bn_eps=bn_eps, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def efficientnet_b0(in_size=(224, 224), **kwargs): """ EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs) def efficientnet_b1(in_size=(240, 240), **kwargs): """ EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs) def efficientnet_b2(in_size=(260, 260), **kwargs): """ EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs) def efficientnet_b3(in_size=(300, 300), **kwargs): """ EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs) def efficientnet_b4(in_size=(380, 380), **kwargs): """ EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs) def efficientnet_b5(in_size=(456, 456), **kwargs): """ EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs) def efficientnet_b6(in_size=(528, 528), **kwargs): """ EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs) def efficientnet_b7(in_size=(600, 600), **kwargs): """ EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs) def efficientnet_b8(in_size=(672, 672), **kwargs): """ EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (672, 672) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs) def efficientnet_b0b(in_size=(224, 224), **kwargs): """ EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0b", **kwargs) def efficientnet_b1b(in_size=(240, 240), **kwargs): """ EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1b", **kwargs) def efficientnet_b2b(in_size=(260, 260), **kwargs): """ EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2b", **kwargs) def efficientnet_b3b(in_size=(300, 300), **kwargs): """ EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3b", **kwargs) def efficientnet_b4b(in_size=(380, 380), **kwargs): """ EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4b", **kwargs) def efficientnet_b5b(in_size=(456, 456), **kwargs): """ EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5b", **kwargs) def efficientnet_b6b(in_size=(528, 528), **kwargs): """ EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6b", **kwargs) def efficientnet_b7b(in_size=(600, 600), **kwargs): """ EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7b", **kwargs) def efficientnet_b0c(in_size=(224, 224), **kwargs): """ EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0c", **kwargs) def efficientnet_b1c(in_size=(240, 240), **kwargs): """ EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1c", **kwargs) def efficientnet_b2c(in_size=(260, 260), **kwargs): """ EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (260, 260) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2c", **kwargs) def efficientnet_b3c(in_size=(300, 300), **kwargs): """ EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3c", **kwargs) def efficientnet_b4c(in_size=(380, 380), **kwargs): """ EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (380, 380) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4c", **kwargs) def efficientnet_b5c(in_size=(456, 456), **kwargs): """ EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (456, 456) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5c", **kwargs) def efficientnet_b6c(in_size=(528, 528), **kwargs): """ EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (528, 528) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6c", **kwargs) def efficientnet_b7c(in_size=(600, 600), **kwargs): """ EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (600, 600) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7c", **kwargs) def efficientnet_b8c(in_size=(672, 672), **kwargs): """ EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (672, 672) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b8c", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3, efficientnet_b4, efficientnet_b5, efficientnet_b6, efficientnet_b7, efficientnet_b8, efficientnet_b0b, efficientnet_b1b, efficientnet_b2b, efficientnet_b3b, efficientnet_b4b, efficientnet_b5b, efficientnet_b6b, efficientnet_b7b, efficientnet_b0c, efficientnet_b1c, efficientnet_b2c, efficientnet_b3c, efficientnet_b4c, efficientnet_b5c, efficientnet_b6c, efficientnet_b7c, efficientnet_b8c, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != efficientnet_b0 or weight_count == 5288548) assert (model != efficientnet_b1 or weight_count == 7794184) assert (model != efficientnet_b2 or weight_count == 9109994) assert (model != efficientnet_b3 or weight_count == 12233232) assert (model != efficientnet_b4 or weight_count == 19341616) assert (model != efficientnet_b5 or weight_count == 30389784) assert (model != efficientnet_b6 or weight_count == 43040704) assert (model != efficientnet_b7 or weight_count == 66347960) assert (model != efficientnet_b8 or weight_count == 87413142) assert (model != efficientnet_b0b or weight_count == 5288548) assert (model != efficientnet_b1b or weight_count == 7794184) assert (model != efficientnet_b2b or weight_count == 9109994) assert (model != efficientnet_b3b or weight_count == 12233232) assert (model != efficientnet_b4b or weight_count == 19341616) assert (model != efficientnet_b5b or weight_count == 30389784) assert (model != efficientnet_b6b or weight_count == 43040704) assert (model != efficientnet_b7b or weight_count == 66347960) x = torch.randn(1, 3, net.in_size[0], net.in_size[1]) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
37,745
35.933464
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/edanet.py
""" EDANet for image segmentation, implemented in PyTorch. Original paper: 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1809.06323. """ __all__ = ['EDANet', 'edanet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, conv3x3, conv1x1_block, asym_conv3x3_block, NormActivation, InterpolationBlock class DownBlock(nn.Module): """ EDANet specific downsample block for the main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(DownBlock, self).__init__() self.expand = (in_channels < out_channels) mid_channels = out_channels - in_channels if self.expand else out_channels self.conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, bias=True, stride=2) if self.expand: self.pool = nn.MaxPool2d( kernel_size=2, stride=2) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps) def forward(self, x): y = self.conv(x) if self.expand: z = self.pool(x) y = torch.cat((y, z), dim=1) y = self.norm_activ(y) return y class EDABlock(nn.Module): """ EDANet base block. Parameters: ---------- channels : int Number of input/output channels. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilation, dropout_rate, bn_eps): super(EDABlock, self).__init__() self.use_dropout = (dropout_rate != 0.0) self.conv1 = asym_conv3x3_block( channels=channels, bias=True, lw_use_bn=False, bn_eps=bn_eps, lw_activation=None) self.conv2 = asym_conv3x3_block( channels=channels, padding=dilation, dilation=dilation, bias=True, lw_use_bn=False, bn_eps=bn_eps, rw_activation=None) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) return x class EDAUnit(nn.Module): """ EDANet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilation : int Dilation value for convolution layer. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, dilation, dropout_rate, bn_eps): super(EDAUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) mid_channels = out_channels - in_channels self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=True) self.conv2 = EDABlock( channels=mid_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps) self.activ = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) x = torch.cat((x, identity), dim=1) x = self.activ(x) return x class EDANet(nn.Module): """ EDANet model from 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1809.06323. Parameters: ---------- channels : list of int Number of output channels for the first unit of each stage. dilations : list of list of int Dilations for blocks. growth_rate : int Growth rate for numbers of output channels for each non-first unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, dilations, growth_rate, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(EDANet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size dropout_rate = 0.02 self.features = nn.Sequential() for i, dilations_per_stage in enumerate(dilations): out_channels = channels[i] stage = nn.Sequential() for j, dilation in enumerate(dilations_per_stage): if j == 0: stage.add_module("unit{}".format(j + 1), DownBlock( in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps)) else: out_channels += growth_rate stage.add_module("unit{}".format(j + 1), EDAUnit( in_channels=in_channels, out_channels=out_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.head = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=True) self.up = InterpolationBlock( scale_factor=8, align_corners=True) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.head(x) x = self.up(x) return x def get_edanet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create EDANet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [15, 60, 130, 450] dilations = [[0], [0, 1, 1, 1, 2, 2], [0, 2, 2, 4, 4, 8, 8, 16, 16]] growth_rate = 40 bn_eps = 1e-3 net = EDANet( channels=channels, dilations=dilations, growth_rate=growth_rate, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def edanet_cityscapes(num_classes=19, **kwargs): """ EDANet model for Cityscapes from 'Efficient Dense Modules of Asymmetric Convolution for Real-Time Semantic Segmentation,' https://arxiv.org/abs/1809.06323. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_edanet(num_classes=num_classes, model_name="edanet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ edanet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != edanet_cityscapes or weight_count == 689485) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
10,158
28.618076
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/channelnet.py
""" ChannelNet for ImageNet-1K, implemented in PyTorch. Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. """ __all__ = ['ChannelNet', 'channelnet'] import os import torch import torch.nn as nn import torch.nn.init as init def dwconv3x3(in_channels, out_channels, stride, bias=False): """ 3x3 depthwise version of the standard convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. """ return nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=out_channels, bias=bias) class ChannetConv(nn.Module): """ ChannelNet specific convolution block with Batch normalization and ReLU6 activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, dropout_rate=0.0, activate=True): super(ChannetConv, self).__init__() self.use_dropout = (dropout_rate > 0.0) self.activate = activate self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) self.bn = nn.BatchNorm2d(num_features=out_channels) if self.activate: self.activ = nn.ReLU6(inplace=True) def forward(self, x): x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def channet_conv1x1(in_channels, out_channels, stride=1, groups=1, bias=False, dropout_rate=0.0, activate=True): """ 1x1 version of ChannelNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ return ChannetConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=groups, bias=bias, dropout_rate=dropout_rate, activate=activate) def channet_conv3x3(in_channels, out_channels, stride, padding=1, dilation=1, groups=1, bias=False, dropout_rate=0.0, activate=True): """ 3x3 version of the standard convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. dropout_rate : float, default 0.0 Dropout rate. activate : bool, default True Whether activate the convolution block. """ return ChannetConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, dropout_rate=dropout_rate, activate=activate) class ChannetDwsConvBlock(nn.Module): """ ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. groups : int, default 1 Number of groups. dropout_rate : float, default 0.0 Dropout rate. """ def __init__(self, in_channels, out_channels, stride, groups=1, dropout_rate=0.0): super(ChannetDwsConvBlock, self).__init__() self.dw_conv = dwconv3x3( in_channels=in_channels, out_channels=in_channels, stride=stride) self.pw_conv = channet_conv1x1( in_channels=in_channels, out_channels=out_channels, groups=groups, dropout_rate=dropout_rate) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class SimpleGroupBlock(nn.Module): """ ChannelNet specific block with a sequence of depthwise separable group convolution layers. Parameters: ---------- channels : int Number of input/output channels. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, channels, multi_blocks, groups, dropout_rate): super(SimpleGroupBlock, self).__init__() self.blocks = nn.Sequential() for i in range(multi_blocks): self.blocks.add_module("block{}".format(i + 1), ChannetDwsConvBlock( in_channels=channels, out_channels=channels, stride=1, groups=groups, dropout_rate=dropout_rate)) def forward(self, x): x = self.blocks(x) return x class ChannelwiseConv2d(nn.Module): """ ChannelNet specific block with channel-wise convolution. Parameters: ---------- groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, groups, dropout_rate): super(ChannelwiseConv2d, self).__init__() self.use_dropout = (dropout_rate > 0.0) self.conv = nn.Conv3d( in_channels=1, out_channels=groups, kernel_size=(4 * groups, 1, 1), stride=(groups, 1, 1), padding=(2 * groups - 1, 0, 0), bias=False) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): batch, channels, height, width = x.size() x = x.unsqueeze(dim=1) x = self.conv(x) if self.use_dropout: x = self.dropout(x) x = x.view(batch, channels, height, width) return x class ConvGroupBlock(nn.Module): """ ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions. Parameters: ---------- channels : int Number of input/output channels. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. """ def __init__(self, channels, multi_blocks, groups, dropout_rate): super(ConvGroupBlock, self).__init__() self.conv = ChannelwiseConv2d( groups=groups, dropout_rate=dropout_rate) self.block = SimpleGroupBlock( channels=channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate) def forward(self, x): x = self.conv(x) x = self.block(x) return x class ChannetUnit(nn.Module): """ ChannelNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : tuple/list of 2 int Number of output channels for each sub-block. strides : int or tuple/list of 2 int Strides of the convolution. multi_blocks : int Number of DWS layers in the sequence. groups : int Number of groups. dropout_rate : float Dropout rate. block_names : tuple/list of 2 str Sub-block names. merge_type : str Type of sub-block output merging. """ def __init__(self, in_channels, out_channels_list, strides, multi_blocks, groups, dropout_rate, block_names, merge_type): super(ChannetUnit, self).__init__() assert (len(block_names) == 2) assert (merge_type in ["seq", "add", "cat"]) self.merge_type = merge_type self.blocks = nn.Sequential() for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)): stride_i = (strides if i == 0 else 1) if block_name == "channet_conv3x3": self.blocks.add_module("block{}".format(i + 1), channet_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride_i, dropout_rate=dropout_rate, activate=False)) elif block_name == "channet_dws_conv_block": self.blocks.add_module("block{}".format(i + 1), ChannetDwsConvBlock( in_channels=in_channels, out_channels=out_channels, stride=stride_i, dropout_rate=dropout_rate)) elif block_name == "simple_group_block": self.blocks.add_module("block{}".format(i + 1), SimpleGroupBlock( channels=in_channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate)) elif block_name == "conv_group_block": self.blocks.add_module("block{}".format(i + 1), ConvGroupBlock( channels=in_channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate)) else: raise NotImplementedError() in_channels = out_channels def forward(self, x): x_outs = [] for block in self.blocks._modules.values(): x = block(x) x_outs.append(x) if self.merge_type == "add": for i in range(len(x_outs) - 1): x = x + x_outs[i] elif self.merge_type == "cat": x = torch.cat(tuple(x_outs), dim=1) return x class ChannelNet(nn.Module): """ ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. Parameters: ---------- channels : list of list of list of int Number of output channels for each unit. block_names : list of list of list of str Names of blocks for each unit. block_names : list of list of str Merge types for each unit. dropout_rate : float, default 0.0001 Dropout rate. multi_blocks : int, default 2 Block count architectural parameter. groups : int, default 2 Group count architectural parameter. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, block_names, merge_types, dropout_rate=0.0001, multi_blocks=2, groups=2, in_channels=3, in_size=(224, 224), num_classes=1000): super(ChannelNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) else 1 stage.add_module("unit{}".format(j + 1), ChannetUnit( in_channels=in_channels, out_channels_list=out_channels, strides=strides, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate, block_names=block_names[i][j], merge_type=merge_types[i][j])) if merge_types[i][j] == "cat": in_channels = sum(out_channels) else: in_channels = out_channels[-1] self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_channelnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ChannelNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]] block_names = [[["channet_conv3x3", "channet_dws_conv_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]], [["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]], [["channet_dws_conv_block", "channet_dws_conv_block"]]] merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]] net = ChannelNet( channels=channels, block_names=block_names, merge_types=merge_types, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def channelnet(**kwargs): """ ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,' https://arxiv.org/abs/1809.01330. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_channelnet(model_name="channelnet", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ channelnet, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != channelnet or weight_count == 3875112) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,471
29.633499
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/pnasnet.py
""" PNASNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. """ __all__ = ['PNASNet', 'pnasnet5large'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1 from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock class PnasMaxPoolBlock(nn.Module): """ PNASNet specific Max pooling layer with extra padding. Parameters: ---------- stride : int or tuple/list of 2 int, default 2 Strides of the convolution. extra_padding : bool, default False Whether to use extra padding. """ def __init__(self, stride=2, extra_padding=False): super(PnasMaxPoolBlock, self).__init__() self.extra_padding = extra_padding self.pool = nn.MaxPool2d( kernel_size=3, stride=stride, padding=1) if self.extra_padding: self.pad = nn.ZeroPad2d(padding=(1, 0, 1, 0)) def forward(self, x): if self.extra_padding: x = self.pad(x) x = self.pool(x) if self.extra_padding: x = x[:, :, 1:, 1:].contiguous() return x def pnas_conv1x1(in_channels, out_channels, stride=1): """ 1x1 version of the PNASNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. """ return NasConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=1) class DwsBranch(nn.Module): """ PNASNet specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ def __init__(self, in_channels, out_channels, kernel_size, stride, extra_padding=False, stem=False): super(DwsBranch, self).__init__() assert (not stem) or (not extra_padding) mid_channels = out_channels if stem else in_channels padding = kernel_size // 2 self.conv1 = NasDwsConv( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=padding, extra_padding=extra_padding) self.conv2 = NasDwsConv( in_channels=mid_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def dws_branch_k3(in_channels, out_channels, stride=2, extra_padding=False, stem=False): """ 3x3 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Strides of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, extra_padding=extra_padding, stem=stem) def dws_branch_k5(in_channels, out_channels, stride=2, extra_padding=False, stem=False): """ 5x5 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Strides of the convolution. extra_padding : bool, default False Whether to use extra padding. stem : bool, default False Whether to use squeeze reduction if False. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=5, stride=stride, extra_padding=extra_padding, stem=stem) def dws_branch_k7(in_channels, out_channels, stride=2, extra_padding=False): """ 7x7 version of the PNASNet specific depthwise separable convolution branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Strides of the convolution. extra_padding : bool, default False Whether to use extra padding. """ return DwsBranch( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=stride, extra_padding=extra_padding, stem=False) class PnasMaxPathBlock(nn.Module): """ PNASNet specific `max path` auxiliary block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PnasMaxPathBlock, self).__init__() self.maxpool = PnasMaxPoolBlock() self.conv = conv1x1( in_channels=in_channels, out_channels=out_channels) self.bn = nasnet_batch_norm(channels=out_channels) def forward(self, x): x = self.maxpool(x) x = self.conv(x) x = self.bn(x) return x class PnasBaseUnit(nn.Module): """ PNASNet base unit. """ def __init__(self): super(PnasBaseUnit, self).__init__() def cell_forward(self, x, x_prev): assert (hasattr(self, 'comb0_left')) x_left = x_prev x_right = x x0 = self.comb0_left(x_left) + self.comb0_right(x_left) x1 = self.comb1_left(x_right) + self.comb1_right(x_right) x2 = self.comb2_left(x_right) + self.comb2_right(x_right) x3 = self.comb3_left(x2) + self.comb3_right(x_right) x4 = self.comb4_left(x_left) + (self.comb4_right(x_right) if self.comb4_right else x_right) x_out = torch.cat((x0, x1, x2, x3, x4), dim=1) return x_out class Stem1Unit(PnasBaseUnit): """ PNASNet Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 5 self.conv_1x1 = pnas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb0_right = PnasMaxPathBlock( in_channels=in_channels, out_channels=mid_channels) self.comb1_left = dws_branch_k7( in_channels=mid_channels, out_channels=mid_channels) self.comb1_right = PnasMaxPoolBlock() self.comb2_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels) self.comb2_right = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels) self.comb3_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=1) self.comb3_right = PnasMaxPoolBlock() self.comb4_left = dws_branch_k3( in_channels=in_channels, out_channels=mid_channels, stem=True) self.comb4_right = pnas_conv1x1( in_channels=mid_channels, out_channels=mid_channels, stride=2) def forward(self, x): x_prev = x x = self.conv_1x1(x) x_out = self.cell_forward(x, x_prev) return x_out class PnasUnit(PnasBaseUnit): """ PNASNet ordinary unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. reduction : bool, default False Whether to use reduction. extra_padding : bool, default False Whether to use extra padding. match_prev_layer_dimensions : bool, default False Whether to match previous layer dimensions. """ def __init__(self, in_channels, prev_in_channels, out_channels, reduction=False, extra_padding=False, match_prev_layer_dimensions=False): super(PnasUnit, self).__init__() mid_channels = out_channels // 5 stride = 2 if reduction else 1 if match_prev_layer_dimensions: self.conv_prev_1x1 = NasPathBlock( in_channels=prev_in_channels, out_channels=mid_channels) else: self.conv_prev_1x1 = pnas_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.conv_1x1 = pnas_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.comb0_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb0_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb1_left = dws_branch_k7( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb1_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb2_left = dws_branch_k5( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb2_right = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) self.comb3_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=1) self.comb3_right = PnasMaxPoolBlock( stride=stride, extra_padding=extra_padding) self.comb4_left = dws_branch_k3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, extra_padding=extra_padding) if reduction: self.comb4_right = pnas_conv1x1( in_channels=mid_channels, out_channels=mid_channels, stride=stride) else: self.comb4_right = None def forward(self, x, x_prev): # print("x.shape={}, x_prev.shape={}".format(x.shape, x_prev.shape)) x_prev = self.conv_prev_1x1(x_prev) x = self.conv_1x1(x) x_out = self.cell_forward(x, x_prev) return x_out class PNASNet(nn.Module): """ PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. stem1_blocks_channels : list of 2 int Number of output channels for the Stem1 unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (331, 331) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, stem1_blocks_channels, in_channels=3, in_size=(331, 331), num_classes=1000): super(PNASNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=2, last_ordinals=2) self.features.add_module("init_block", NASNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels self.features.add_module("stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=stem1_blocks_channels)) prev_in_channels = in_channels in_channels = stem1_blocks_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential() for j, out_channels in enumerate(channels_per_stage): reduction = (j == 0) extra_padding = (j == 0) and (i not in [0, 2]) match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0)) stage.add_module("unit{}".format(j + 1), PnasUnit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, reduction=reduction, extra_padding=extra_padding, match_prev_layer_dimensions=match_prev_layer_dimensions)) prev_in_channels = in_channels in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("activ", nn.ReLU()) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=11, stride=1)) self.output = nn.Sequential() self.output.add_module("dropout", nn.Dropout(p=0.5)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_pnasnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PNASNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ repeat = 4 init_block_channels = 96 stem_blocks_channels = [270, 540] norm_channels = [1080, 2160, 4320] channels = [[ci] * repeat for ci in norm_channels] stem1_blocks_channels = stem_blocks_channels[0] channels[0] = [stem_blocks_channels[1]] + channels[0] net = PNASNet( channels=channels, init_block_channels=init_block_channels, stem1_blocks_channels=stem1_blocks_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def pnasnet5large(**kwargs): """ PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pnasnet(model_name="pnasnet5large", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ pnasnet5large, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != pnasnet5large or weight_count == 86057668) x = torch.randn(1, 3, 331, 331) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,176
28.945634
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/efficientnetedge.py
""" EfficientNet-Edge for ImageNet-1K, implemented in PyTorch. Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. """ __all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b'] import os import math import torch.nn as nn import torch.nn.init as init from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock from .efficientnet import EffiInvResUnit, EffiInitBlock class EffiEdgeResUnit(nn.Module): """ EfficientNet-Edge edge residual unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. exp_factor : int Factor for expansion of channels. se_factor : int SE reduction factor for each unit. mid_from_in : bool Whether to use input channel count for middle channel count calculation. use_skip : bool Whether to use skip connection. bn_eps : float Small float added to variance in Batch norm. activation : str Name of activation function. """ def __init__(self, in_channels, out_channels, stride, exp_factor, se_factor, mid_from_in, use_skip, bn_eps, activation): super(EffiEdgeResUnit, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) and use_skip self.use_se = se_factor > 0 mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), mid_activation=activation) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, activation=None) def forward(self, x): if self.residual: identity = x x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class EfficientNetEdge(nn.Module): """ EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernel_sizes : list of list of int Number of kernel sizes for each unit. strides_per_stage : list int Stride value for the first unit of each stage. expansion_factors : list of list of int Number of expansion factors for each unit. dropout_rate : float, default 0.2 Fraction of the input units to drop. Must be a number between 0 and 1. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernel_sizes, strides_per_stage, expansion_factors, dropout_rate=0.2, tf_mode=False, bn_eps=1e-5, in_channels=3, in_size=(224, 224), num_classes=1000): super(EfficientNetEdge, self).__init__() self.in_size = in_size self.num_classes = num_classes activation = "relu" self.features = nn.Sequential() self.features.add_module("init_block", EffiInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): kernel_sizes_per_stage = kernel_sizes[i] expansion_factors_per_stage = expansion_factors[i] mid_from_in = (i != 0) use_skip = (i != 0) stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): kernel_size = kernel_sizes_per_stage[j] expansion_factor = expansion_factors_per_stage[j] stride = strides_per_stage[i] if (j == 0) else 1 if i < 3: stage.add_module("unit{}".format(j + 1), EffiEdgeResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, exp_factor=expansion_factor, se_factor=0, mid_from_in=mid_from_in, use_skip=use_skip, bn_eps=bn_eps, activation=activation)) else: stage.add_module("unit{}".format(j + 1), EffiInvResUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, exp_factor=expansion_factor, se_factor=0, bn_eps=bn_eps, activation=activation, tf_mode=tf_mode)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps, activation=activation)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Sequential() if dropout_rate > 0.0: self.output.add_module("dropout", nn.Dropout(p=dropout_rate)) self.output.add_module("fc", nn.Linear( in_features=in_channels, out_features=num_classes)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_efficientnet_edge(version, in_size, tf_mode=False, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create EfficientNet-Edge model with specific parameters. Parameters: ---------- version : str Version of EfficientNet ('small', 'medium', 'large'). in_size : tuple of two ints Spatial size of the expected input image. tf_mode : bool, default False Whether to use TF-like mode. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ dropout_rate = 0.0 if version == "small": assert (in_size == (224, 224)) depth_factor = 1.0 width_factor = 1.0 # dropout_rate = 0.2 elif version == "medium": assert (in_size == (240, 240)) depth_factor = 1.1 width_factor = 1.0 # dropout_rate = 0.2 elif version == "large": assert (in_size == (300, 300)) depth_factor = 1.4 width_factor = 1.2 # dropout_rate = 0.3 else: raise ValueError("Unsupported EfficientNet-Edge version {}".format(version)) init_block_channels = 32 layers = [1, 2, 4, 5, 4, 2] downsample = [1, 1, 1, 1, 0, 1] channels_per_layers = [24, 32, 48, 96, 144, 192] expansion_factors_per_layers = [4, 8, 8, 8, 8, 8] kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5] strides_per_stage = [1, 2, 2, 2, 1, 2] final_block_channels = 1280 layers = [int(math.ceil(li * depth_factor)) for li in layers] channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers] from functools import reduce channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), []) kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(kernel_sizes_per_layers, layers, downsample), []) expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(expansion_factors_per_layers, layers, downsample), []) strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(strides_per_stage, layers, downsample), []) strides_per_stage = [si[0] for si in strides_per_stage] init_block_channels = round_channels(init_block_channels * width_factor) if width_factor > 1.0: assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor)) final_block_channels = round_channels(final_block_channels * width_factor) net = EfficientNetEdge( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernel_sizes=kernel_sizes, strides_per_stage=strides_per_stage, expansion_factors=expansion_factors, dropout_rate=dropout_rate, tf_mode=tf_mode, bn_eps=bn_eps, in_size=in_size, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def efficientnet_edge_small_b(in_size=(224, 224), **kwargs): """ EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_small_b", **kwargs) def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs): """ EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (240, 240) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_medium_b", **kwargs) def efficientnet_edge_large_b(in_size=(300, 300), **kwargs): """ EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_edge_large_b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ efficientnet_edge_small_b, efficientnet_edge_medium_b, efficientnet_edge_large_b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != efficientnet_edge_small_b or weight_count == 5438392) assert (model != efficientnet_edge_medium_b or weight_count == 6899496) assert (model != efficientnet_edge_large_b or weight_count == 10589712) x = torch.randn(1, 3, net.in_size[0], net.in_size[1]) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,866
35.799505
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/ibnresnext.py
""" IBN-ResNeXt for ImageNet-1K, implemented in PyTorch. Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. """ __all__ = ['IBNResNeXt', 'ibn_resnext50_32x4d', 'ibn_resnext101_32x4d', 'ibn_resnext101_64x4d'] import os import math import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResInitBlock from .ibnresnet import ibn_conv1x1_block class IBNResNeXtBottleneck(nn.Module): """ IBN-ResNeXt bottleneck block for residual path in IBN-ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, conv1_ibn): super(IBNResNeXtBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D self.conv1 = ibn_conv1x1_block( in_channels=in_channels, out_channels=group_width, use_ibn=conv1_ibn) self.conv2 = conv3x3_block( in_channels=group_width, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block( in_channels=group_width, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class IBNResNeXtUnit(nn.Module): """ IBN-ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. conv1_ibn : bool Whether to use IBN normalization in the first convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, conv1_ibn): super(IBNResNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = IBNResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, conv1_ibn=conv1_ibn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class IBNResNeXt(nn.Module): """ IBN-ResNeXt model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), num_classes=1000): super(IBNResNeXt, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 conv1_ibn = (out_channels < 2048) stage.add_module("unit{}".format(j + 1), IBNResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width, conv1_ibn=conv1_ibn)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_ibnresnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create IBN-ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported IBN-ResNeXt with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = IBNResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def ibn_resnext50_32x4d(**kwargs): """ IBN-ResNeXt-50 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="ibn_resnext50_32x4d", **kwargs) def ibn_resnext101_32x4d(**kwargs): """ IBN-ResNeXt-101 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="ibn_resnext101_32x4d", **kwargs) def ibn_resnext101_64x4d(**kwargs): """ IBN-ResNeXt-101 (64x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,' https://arxiv.org/abs/1807.09441. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_ibnresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="ibn_resnext101_64x4d", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ ibn_resnext50_32x4d, ibn_resnext101_32x4d, ibn_resnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != ibn_resnext50_32x4d or weight_count == 25028904) assert (model != ibn_resnext101_32x4d or weight_count == 44177704) assert (model != ibn_resnext101_64x4d or weight_count == 83455272) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
10,749
30.341108
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/squeezenext.py
""" SqueezeNext for ImageNet-1K, implemented in PyTorch. Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. """ __all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2'] import os import torch.nn as nn import torch.nn.init as init from .common import ConvBlock, conv1x1_block, conv7x7_block class SqnxtUnit(nn.Module): """ SqueezeNext unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(SqnxtUnit, self).__init__() if stride == 2: reduction_den = 1 self.resize_identity = True elif in_channels > out_channels: reduction_den = 4 self.resize_identity = True else: reduction_den = 2 self.resize_identity = False self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=(in_channels // reduction_den), stride=stride, bias=True) self.conv2 = conv1x1_block( in_channels=(in_channels // reduction_den), out_channels=(in_channels // (2 * reduction_den)), bias=True) self.conv3 = ConvBlock( in_channels=(in_channels // (2 * reduction_den)), out_channels=(in_channels // reduction_den), kernel_size=(1, 3), stride=1, padding=(0, 1), bias=True) self.conv4 = ConvBlock( in_channels=(in_channels // reduction_den), out_channels=(in_channels // reduction_den), kernel_size=(3, 1), stride=1, padding=(1, 0), bias=True) self.conv5 = conv1x1_block( in_channels=(in_channels // reduction_den), out_channels=out_channels, bias=True) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) x = x + identity x = self.activ(x) return x class SqnxtInitBlock(nn.Module): """ SqueezeNext specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(SqnxtInitBlock, self).__init__() self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2, padding=1, bias=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, ceil_mode=True) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class SqueezeNext(nn.Module): """ SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(SqueezeNext, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SqnxtInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SqnxtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bias=True)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_squeezenext(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SqueezeNext model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('23' or '23v5'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 64 final_block_channels = 128 channels_per_layers = [32, 64, 128, 256] if version == '23': layers = [6, 6, 8, 1] elif version == '23v5': layers = [2, 4, 14, 1] else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) final_block_channels = int(final_block_channels * width_scale) net = SqueezeNext( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sqnxt23_w1(**kwargs): """ 1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs) def sqnxt23_w3d2(**kwargs): """ 1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs) def sqnxt23_w2(**kwargs): """ 2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs) def sqnxt23v5_w1(**kwargs): """ 1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs) def sqnxt23v5_w3d2(**kwargs): """ 1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs) def sqnxt23v5_w2(**kwargs): """ 2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ sqnxt23_w1, sqnxt23_w3d2, sqnxt23_w2, sqnxt23v5_w1, sqnxt23v5_w3d2, sqnxt23v5_w2, ] for model in models: net = model(pretrained=pretrained) # net.eval() net.train() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sqnxt23_w1 or weight_count == 724056) assert (model != sqnxt23_w3d2 or weight_count == 1511824) assert (model != sqnxt23_w2 or weight_count == 2583752) assert (model != sqnxt23v5_w1 or weight_count == 921816) assert (model != sqnxt23v5_w3d2 or weight_count == 1953616) assert (model != sqnxt23v5_w2 or weight_count == 3366344) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,238
30.543814
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/xdensenet.py
""" X-DenseNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. """ __all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block', 'XDenseUnit'] import os import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .preresnet import PreResInitBlock, PreResActivation from .densenet import TransitionBlock class XConv2d(nn.Conv2d): """ X-Convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. groups : int, default 1 Number of groups. expand_ratio : int, default 2 Ratio of expansion. """ def __init__(self, in_channels, out_channels, kernel_size, groups=1, expand_ratio=2, **kwargs): super(XConv2d, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups, **kwargs) self.expand_ratio = expand_ratio if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) grouped_in_channels = in_channels // groups self.mask = torch.nn.Parameter( data=torch.Tensor(out_channels, grouped_in_channels, *kernel_size), requires_grad=False) self.init_parameters() def init_parameters(self): shape = self.mask.shape expand_size = max(shape[1] // self.expand_ratio, 1) self.mask[:] = 0 for i in range(shape[0]): jj = torch.randperm(shape[1], device=self.mask.device)[:expand_size] self.mask[i, jj, :, :] = 1 def forward(self, input): masked_weight = self.weight.mul(self.mask) return F.conv2d( input=input, weight=masked_weight, bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) class PreXConvBlock(nn.Module): """ X-Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, return_preact=False, activate=True, expand_ratio=2): super(PreXConvBlock, self).__init__() self.return_preact = return_preact self.activate = activate self.bn = nn.BatchNorm2d(num_features=in_channels) if self.activate: self.activ = nn.ReLU(inplace=True) self.conv = XConv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, expand_ratio=expand_ratio) def forward(self, x): x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_xconv1x1_block(in_channels, out_channels, stride=1, bias=False, return_preact=False, activate=True, expand_ratio=2): """ 1x1 version of the pre-activated x-convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ return PreXConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, bias=bias, return_preact=return_preact, activate=activate, expand_ratio=expand_ratio) def pre_xconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, return_preact=False, activate=True, expand_ratio=2): """ 3x3 version of the pre-activated x-convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. expand_ratio : int, default 2 Ratio of expansion. """ return PreXConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, return_preact=return_preact, activate=activate, expand_ratio=expand_ratio) class XDenseUnit(nn.Module): """ X-DenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int Ratio of expansion. """ def __init__(self, in_channels, out_channels, dropout_rate, expand_ratio): super(XDenseUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size self.conv1 = pre_xconv1x1_block( in_channels=in_channels, out_channels=mid_channels, expand_ratio=expand_ratio) self.conv2 = pre_xconv3x3_block( in_channels=mid_channels, out_channels=inc_channels, expand_ratio=expand_ratio) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = torch.cat((identity, x), dim=1) return x class XDenseNet(nn.Module): """ X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. expand_ratio : int, default 2 Ratio of expansion. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, expand_ratio=2, in_channels=3, in_size=(224, 224), num_classes=1000): super(XDenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), XDenseUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate, expand_ratio=expand_ratio)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_xdensenet(blocks, expand_ratio=2, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create X-DenseNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. expand_ratio : int, default 2 Ratio of expansion. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif blocks == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif blocks == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif blocks == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = XDenseNet( channels=channels, init_block_channels=init_block_channels, expand_ratio=expand_ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def xdensenet121_2(**kwargs): """ X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs) def xdensenet161_2(**kwargs): """ X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs) def xdensenet169_2(**kwargs): """ X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs) def xdensenet201_2(**kwargs): """ X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,' https://arxiv.org/abs/1711.08757. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ xdensenet121_2, xdensenet161_2, xdensenet169_2, xdensenet201_2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != xdensenet121_2 or weight_count == 7978856) assert (model != xdensenet161_2 or weight_count == 28681000) assert (model != xdensenet169_2 or weight_count == 14149480) assert (model != xdensenet201_2 or weight_count == 20013928) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
16,251
30.015267
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/linknet.py
""" LinkNet for image segmentation, implemented in PyTorch. Original paper: 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,' https://arxiv.org/abs/1707.03718. """ __all__ = ['LinkNet', 'linknet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1_block, conv3x3_block, deconv3x3_block, Hourglass, Identity from .resnet import resnet18 class DecoderStage(nn.Module): """ LinkNet specific decoder stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the deconvolution. out_padding : int or tuple/list of 2 int Output padding value for deconvolution layer. bias : bool, default False Whether the layer uses a bias vector. """ def __init__(self, in_channels, out_channels, stride, output_padding, bias): super(DecoderStage, self).__init__() mid_channels = in_channels // 4 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias) self.conv2 = deconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, out_padding=output_padding, bias=bias) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class LinkNetHead(nn.Module): """ LinkNet head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(LinkNetHead, self).__init__() mid_channels = in_channels // 2 self.conv1 = deconv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2, padding=1, out_padding=1, bias=True) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, bias=True) self.conv3 = nn.ConvTranspose2d( in_channels=mid_channels, out_channels=out_channels, kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class LinkNet(nn.Module): """ LinkNet model from 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,' https://arxiv.org/abs/1707.03718. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels form feature extractor. channels : list of int Number of output channels for the first unit of each stage. dilations : list of list of int Dilation values for each unit. dropout_rates : list of float Parameter of dropout layer for each stage. downs : list of int Whether to downscale or upscale in each stage. correct_size_mistmatch : bool Whether to correct downscaled sizes of images in encoder. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels, channels, strides, output_paddings, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(LinkNet, self).__init__() assert (in_channels == 3) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size bias = False self.stem = backbone.init_block in_channels = backbone_out_channels down_seq = nn.Sequential() down_seq.add_module("down1", backbone.stage1) down_seq.add_module("down2", backbone.stage2) down_seq.add_module("down3", backbone.stage3) down_seq.add_module("down4", backbone.stage4) up_seq = nn.Sequential() skip_seq = nn.Sequential() for i, out_channels in enumerate(channels): up_seq.add_module("up{}".format(i + 1), DecoderStage( in_channels=in_channels, out_channels=out_channels, stride=strides[i], output_padding=output_paddings[i], bias=bias)) in_channels = out_channels skip_seq.add_module("skip{}".format(i + 1), Identity()) up_seq = up_seq[::-1] self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq) self.head = LinkNetHead( in_channels=in_channels, out_channels=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.stem(x) x = self.hg(x) x = self.head(x) return x def get_linknet(backbone, backbone_out_channels, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create LinkNet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels form feature extractor. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [256, 128, 64, 64] strides = [2, 2, 2, 1] output_paddings = [1, 1, 1, 0] net = LinkNet( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, strides=strides, output_paddings=output_paddings, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def linknet_cityscapes(pretrained_backbone=False, num_classes=19, **kwargs): """ LinkNet model for Cityscapes from 'LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation,' https://arxiv.org/abs/1707.03718. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone[-1] backbone_out_channels = 512 return get_linknet(backbone=backbone, backbone_out_channels=backbone_out_channels, num_classes=num_classes, model_name="linknet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ linknet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != linknet_cityscapes or weight_count == 11535699) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
9,565
29.5623
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/diaresnet_cifar.py
""" DIA-ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. """ __all__ = ['CIFARDIAResNet', 'diaresnet20_cifar10', 'diaresnet20_cifar100', 'diaresnet20_svhn', 'diaresnet56_cifar10', 'diaresnet56_cifar100', 'diaresnet56_svhn', 'diaresnet110_cifar10', 'diaresnet110_cifar100', 'diaresnet110_svhn', 'diaresnet164bn_cifar10', 'diaresnet164bn_cifar100', 'diaresnet164bn_svhn', 'diaresnet1001_cifar10', 'diaresnet1001_cifar100', 'diaresnet1001_svhn', 'diaresnet1202_cifar10', 'diaresnet1202_cifar100', 'diaresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block, DualPathSequential from .diaresnet import DIAAttention, DIAResUnit class CIFARDIAResNet(nn.Module): """ DIA-ResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARDIAResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = DualPathSequential(return_two=False) attention = DIAAttention( in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0]) for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), DIAResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False, attention=attention)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_diaresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DIA-ResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARDIAResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def diaresnet20_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar10", **kwargs) def diaresnet20_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar100", **kwargs) def diaresnet20_svhn(num_classes=10, **kwargs): """ DIA-ResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="diaresnet20_svhn", **kwargs) def diaresnet56_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar10", **kwargs) def diaresnet56_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar100", **kwargs) def diaresnet56_svhn(num_classes=10, **kwargs): """ DIA-ResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="diaresnet56_svhn", **kwargs) def diaresnet110_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar10", **kwargs) def diaresnet110_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar100", **kwargs) def diaresnet110_svhn(num_classes=10, **kwargs): """ DIA-ResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="diaresnet110_svhn", **kwargs) def diaresnet164bn_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar10", **kwargs) def diaresnet164bn_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar100", **kwargs) def diaresnet164bn_svhn(num_classes=10, **kwargs): """ DIA-ResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_svhn", **kwargs) def diaresnet1001_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar10", **kwargs) def diaresnet1001_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar100", **kwargs) def diaresnet1001_svhn(num_classes=10, **kwargs): """ DIA-ResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_svhn", **kwargs) def diaresnet1202_cifar10(num_classes=10, **kwargs): """ DIA-ResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar10", **kwargs) def diaresnet1202_cifar100(num_classes=100, **kwargs): """ DIA-ResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar100", **kwargs) def diaresnet1202_svhn(num_classes=10, **kwargs): """ DIA-ResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diaresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (diaresnet20_cifar10, 10), (diaresnet20_cifar100, 100), (diaresnet20_svhn, 10), (diaresnet56_cifar10, 10), (diaresnet56_cifar100, 100), (diaresnet56_svhn, 10), (diaresnet110_cifar10, 10), (diaresnet110_cifar100, 100), (diaresnet110_svhn, 10), (diaresnet164bn_cifar10, 10), (diaresnet164bn_cifar100, 100), (diaresnet164bn_svhn, 10), (diaresnet1001_cifar10, 10), (diaresnet1001_cifar100, 100), (diaresnet1001_svhn, 10), (diaresnet1202_cifar10, 10), (diaresnet1202_cifar100, 100), (diaresnet1202_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != diaresnet20_cifar10 or weight_count == 286866) assert (model != diaresnet20_cifar100 or weight_count == 292716) assert (model != diaresnet20_svhn or weight_count == 286866) assert (model != diaresnet56_cifar10 or weight_count == 870162) assert (model != diaresnet56_cifar100 or weight_count == 876012) assert (model != diaresnet56_svhn or weight_count == 870162) assert (model != diaresnet110_cifar10 or weight_count == 1745106) assert (model != diaresnet110_cifar100 or weight_count == 1750956) assert (model != diaresnet110_svhn or weight_count == 1745106) assert (model != diaresnet164bn_cifar10 or weight_count == 1923002) assert (model != diaresnet164bn_cifar100 or weight_count == 1946132) assert (model != diaresnet164bn_svhn or weight_count == 1923002) assert (model != diaresnet1001_cifar10 or weight_count == 10547450) assert (model != diaresnet1001_cifar100 or weight_count == 10570580) assert (model != diaresnet1001_svhn or weight_count == 10547450) assert (model != diaresnet1202_cifar10 or weight_count == 19438418) assert (model != diaresnet1202_cifar100 or weight_count == 19444268) assert (model != diaresnet1202_svhn or weight_count == 19438418) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
19,959
35.489945
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resdropresnet_cifar.py
""" ResDrop-ResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. """ __all__ = ['CIFARResDropResNet', 'resdropresnet20_cifar10', 'resdropresnet20_cifar100', 'resdropresnet20_svhn'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block from .resnet import ResBlock, ResBottleneck class ResDropResUnit(nn.Module): """ ResDrop-ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. life_prob : float Residual branch life probability. """ def __init__(self, in_channels, out_channels, stride, bottleneck, life_prob): super(ResDropResUnit, self).__init__() self.life_prob = life_prob self.resize_identity = (in_channels != out_channels) or (stride != 1) body_class = ResBottleneck if bottleneck else ResBlock self.body = body_class( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.training: b = torch.bernoulli(torch.full((1,), self.life_prob, dtype=x.dtype, device=x.device)) x = float(b) / self.life_prob * x x = x + identity x = self.activ(x) return x class CIFARResDropResNet(nn.Module): """ ResDrop-ResNet model for CIFAR from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. life_probs : list of float Residual branch life probability for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, life_probs, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARResDropResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels k = 0 for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResDropResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, life_prob=life_probs[k])) in_channels = out_channels k += 1 self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resdropresnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResDrop-ResNet model for CIFAR with specific parameters. Parameters: ---------- classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 channels_per_layers = [16, 32, 64] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] total_layers = sum(layers) final_death_prob = 0.5 life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)] net = CIFARResDropResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, life_probs=life_probs, num_classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resdropresnet20_cifar10(classes=10, **kwargs): """ ResDrop-ResNet-20 model for CIFAR-10 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar10", **kwargs) def resdropresnet20_cifar100(classes=100, **kwargs): """ ResDrop-ResNet-20 model for CIFAR-100 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar100", **kwargs) def resdropresnet20_svhn(classes=10, **kwargs): """ ResDrop-ResNet-20 model for SVHN from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (resdropresnet20_cifar10, 10), (resdropresnet20_cifar100, 100), (resdropresnet20_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resdropresnet20_cifar10 or weight_count == 272474) assert (model != resdropresnet20_cifar100 or weight_count == 278324) assert (model != resdropresnet20_svhn or weight_count == 272474) x = torch.randn(14, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (14, num_classes)) if __name__ == "__main__": _test()
9,918
31.735974
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/bisenet.py
""" BiSeNet for CelebAMask-HQ, implemented in PyTorch. Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. """ __all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq'] import os import torch import torch.nn as nn from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential from .resnet import resnet18 class PyramidPoolingZeroBranch(nn.Module): """ Pyramid pooling zero branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. in_size : tuple of 2 int Spatial size of output image for the upsampling operation. """ def __init__(self, in_channels, out_channels, in_size): super(PyramidPoolingZeroBranch, self).__init__() self.in_size = in_size self.pool = nn.AdaptiveAvgPool2d(1) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.up = InterpolationBlock( scale_factor=None, mode="nearest", align_corners=None) def forward(self, x): in_size = self.in_size if self.in_size is not None else x.shape[2:] x = self.pool(x) x = self.conv(x) x = self.up(x, size=in_size) return x class AttentionRefinementBlock(nn.Module): """ Attention refinement block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(AttentionRefinementBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels) self.pool = nn.AdaptiveAvgPool2d(1) self.conv2 = conv1x1_block( in_channels=out_channels, out_channels=out_channels, activation=(lambda: nn.Sigmoid())) def forward(self, x): x = self.conv1(x) w = self.pool(x) w = self.conv2(w) x = x * w return x class PyramidPoolingMainBranch(nn.Module): """ Pyramid pooling main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. scale_factor : float Multiplier for spatial size. """ def __init__(self, in_channels, out_channels, scale_factor): super(PyramidPoolingMainBranch, self).__init__() self.att = AttentionRefinementBlock( in_channels=in_channels, out_channels=out_channels) self.up = InterpolationBlock( scale_factor=scale_factor, mode="nearest", align_corners=None) self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels) def forward(self, x, y): x = self.att(x) x = x + y x = self.up(x) x = self.conv(x) return x class FeatureFusion(nn.Module): """ Feature fusion block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. reduction : int, default 4 Squeeze reduction value. """ def __init__(self, in_channels, out_channels, reduction=4): super(FeatureFusion, self).__init__() mid_channels = out_channels // reduction self.conv_merge = conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.pool = nn.AdaptiveAvgPool2d(1) self.conv1 = conv1x1( in_channels=out_channels, out_channels=mid_channels) self.activ = nn.ReLU(inplace=True) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels) self.sigmoid = nn.Sigmoid() def forward(self, x, y): x = torch.cat((x, y), dim=1) x = self.conv_merge(x) w = self.pool(x) w = self.conv1(w) w = self.activ(w) w = self.conv2(w) w = self.sigmoid(w) x_att = x * w x = x + x_att return x class PyramidPooling(nn.Module): """ Pyramid Pooling module. Parameters: ---------- x16_in_channels : int Number of input channels for x16. x32_in_channels : int Number of input channels for x32. y_out_channels : int Number of output channels for y-outputs. y32_out_size : tuple of 2 int Spatial size of the y32 tensor. """ def __init__(self, x16_in_channels, x32_in_channels, y_out_channels, y32_out_size): super(PyramidPooling, self).__init__() z_out_channels = 2 * y_out_channels self.pool32 = PyramidPoolingZeroBranch( in_channels=x32_in_channels, out_channels=y_out_channels, in_size=y32_out_size) self.pool16 = PyramidPoolingMainBranch( in_channels=x32_in_channels, out_channels=y_out_channels, scale_factor=2) self.pool8 = PyramidPoolingMainBranch( in_channels=x16_in_channels, out_channels=y_out_channels, scale_factor=2) self.fusion = FeatureFusion( in_channels=z_out_channels, out_channels=z_out_channels) def forward(self, x8, x16, x32): y32 = self.pool32(x32) y16 = self.pool16(x32, y32) y8 = self.pool8(x16, y16) z8 = self.fusion(x8, y8) return z8, y8, y16 class BiSeHead(nn.Module): """ BiSeNet head (final) block. Parameters: ---------- in_channels : int Number of input channels. mid_channels : int Number of middle channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, mid_channels, out_channels): super(BiSeHead, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class BiSeNet(nn.Module): """ BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. Parameters: ---------- backbone : func -> nn.Sequential Feature extractor. aux : bool, default True Whether to output an auxiliary results. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (640, 480) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, backbone, aux=True, fixed_size=True, in_channels=3, in_size=(640, 480), num_classes=19): super(BiSeNet, self).__init__() assert (in_channels == 3) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.backbone, backbone_out_channels = backbone() y_out_channels = backbone_out_channels[0] z_out_channels = 2 * y_out_channels y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None self.pool = PyramidPooling( x16_in_channels=backbone_out_channels[1], x32_in_channels=backbone_out_channels[2], y_out_channels=y_out_channels, y32_out_size=y32_out_size) self.head_z8 = BiSeHead( in_channels=z_out_channels, mid_channels=z_out_channels, out_channels=num_classes) self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None)) if self.aux: mid_channels = y_out_channels // 2 self.head_y8 = BiSeHead( in_channels=y_out_channels, mid_channels=mid_channels, out_channels=num_classes) self.head_y16 = BiSeHead( in_channels=y_out_channels, mid_channels=mid_channels, out_channels=num_classes) self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, a=1) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0) x8, x16, x32 = self.backbone(x) z8, y8, y16 = self.pool(x8, x16, x32) z8 = self.head_z8(z8) z8 = self.up8(z8) if self.aux: y8 = self.head_y8(y8) y16 = self.head_y16(y16) y8 = self.up8(y8) y16 = self.up16(y16) return z8, y8, y16 else: return z8 def get_bisenet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create BiSeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = BiSeNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, num_classes=19, **kwargs): """ BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ def backbone(): features_raw = resnet18(pretrained=pretrained_backbone).features del features_raw[-1] features = MultiOutputSequential(return_last=False) features.add_module("init_block", features_raw[0]) for i, stage in enumerate(features_raw[1:]): if i != 0: stage.do_output = True features.add_module("stage{}".format(i + 1), stage) out_channels = [128, 256, 512] return features, out_channels return get_bisenet(backbone=backbone, num_classes=num_classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (640, 480) aux = True pretrained = False models = [ bisenet_resnet18_celebamaskhq, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) if aux: assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416) else: assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272) batch = 1 x = torch.randn(batch, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys # y.sum().backward() assert (tuple(y.size()) == (batch, 19, in_size[0], in_size[1])) if __name__ == "__main__": _test()
13,181
28.959091
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnet.py
""" ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2', 'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b', 'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck', 'ResUnit', 'ResInitBlock'] import os import torch.nn as nn from .common import conv1x1_block, conv3x3_block, conv7x7_block class ResBlock(nn.Module): """ Simple ResNet block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(ResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class ResBottleneck(nn.Module): """ ResNet bottleneck block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResUnit(nn.Module): """ ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(ResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class ResInitBlock(nn.Module): """ ResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ResInitBlock, self).__init__() self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ResNet(nn.Module): """ ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnet10(**kwargs): """ ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=10, model_name="resnet10", **kwargs) def resnet12(**kwargs): """ ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=12, model_name="resnet12", **kwargs) def resnet14(**kwargs): """ ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, model_name="resnet14", **kwargs) def resnetbc14b(**kwargs): """ ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs) def resnet16(**kwargs): """ ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=16, model_name="resnet16", **kwargs) def resnet18_wd4(**kwargs): """ ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs) def resnet18_wd2(**kwargs): """ ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs) def resnet18_w3d4(**kwargs): """ ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs) def resnet18(**kwargs): """ ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="resnet18", **kwargs) def resnet26(**kwargs): """ ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs) def resnetbc26b(**kwargs): """ ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs) def resnet34(**kwargs): """ ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="resnet34", **kwargs) def resnetbc38b(**kwargs): """ ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs) def resnet50(**kwargs): """ ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="resnet50", **kwargs) def resnet50b(**kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs) def resnet101(**kwargs): """ ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="resnet101", **kwargs) def resnet101b(**kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs) def resnet152(**kwargs): """ ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="resnet152", **kwargs) def resnet152b(**kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs) def resnet200(**kwargs): """ ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, model_name="resnet200", **kwargs) def resnet200b(**kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resnet10, resnet12, resnet14, resnetbc14b, resnet16, resnet18_wd4, resnet18_wd2, resnet18_w3d4, resnet18, resnet26, resnetbc26b, resnet34, resnetbc38b, resnet50, resnet50b, resnet101, resnet101b, resnet152, resnet152b, resnet200, resnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10 or weight_count == 5418792) assert (model != resnet12 or weight_count == 5492776) assert (model != resnet14 or weight_count == 5788200) assert (model != resnetbc14b or weight_count == 10064936) assert (model != resnet16 or weight_count == 6968872) assert (model != resnet18_wd4 or weight_count == 3937400) assert (model != resnet18_wd2 or weight_count == 5804296) assert (model != resnet18_w3d4 or weight_count == 8476056) assert (model != resnet18 or weight_count == 11689512) assert (model != resnet26 or weight_count == 17960232) assert (model != resnetbc26b or weight_count == 15995176) assert (model != resnet34 or weight_count == 21797672) assert (model != resnetbc38b or weight_count == 21925416) assert (model != resnet50 or weight_count == 25557032) assert (model != resnet50b or weight_count == 25557032) assert (model != resnet101 or weight_count == 44549160) assert (model != resnet101b or weight_count == 44549160) assert (model != resnet152 or weight_count == 60192808) assert (model != resnet152b or weight_count == 60192808) assert (model != resnet200 or weight_count == 64673832) assert (model != resnet200b or weight_count == 64673832) batch = 4 x = torch.randn(batch, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (batch, 1000)) if __name__ == "__main__": _test()
25,346
31.579692
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/simpleposemobile_coco.py
""" SimplePose(Mobile) for COCO Keypoint, implemented in PyTorch. Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. """ __all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco', 'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco', 'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco'] import os import torch import torch.nn as nn from .common import conv1x1, DucBlock, HeatmapMaxDetBlock from .resnet import resnet18, resnet50b from .mobilenet import mobilenet_w1 from .mobilenetv2 import mobilenetv2b_w1 from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1 class SimplePoseMobile(nn.Module): """ SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. decoder_init_block_channels : int Number of output channels for the initial unit of the decoder. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, decoder_init_block_channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17): super(SimplePoseMobile, self).__init__() assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.backbone = backbone self.decoder = nn.Sequential() in_channels = backbone_out_channels self.decoder.add_module("init_block", conv1x1( in_channels=in_channels, out_channels=decoder_init_block_channels)) in_channels = decoder_init_block_channels for i, out_channels in enumerate(channels): self.decoder.add_module("unit{}".format(i + 1), DucBlock( in_channels=in_channels, out_channels=out_channels, scale_factor=2)) in_channels = out_channels self.decoder.add_module("final_block", conv1x1( in_channels=in_channels, out_channels=keypoints)) self.heatmap_max_det = HeatmapMaxDetBlock() self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.backbone(x) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_simpleposemobile(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SimplePose(Mobile) model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [128, 64, 32] decoder_init_block_channels = 256 net = SimplePoseMobile( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, decoder_init_block_channels=decoder_init_block_channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet18(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, model_name="simplepose_mobile_resnet18_coco", **kwargs) def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnet50b(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="simplepose_mobile_resnet50b_coco", **kwargs) def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = mobilenet_w1(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints, model_name="simplepose_mobile_mobilenet_w1_coco", **kwargs) def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = mobilenetv2b_w1(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv2b_w1_coco", **kwargs) def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv3_small_w1_coco", **kwargs) def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone).features del backbone[-1] return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints, model_name="simplepose_mobile_mobilenetv3_large_w1_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ simplepose_mobile_resnet18_coco, simplepose_mobile_resnet50b_coco, simplepose_mobile_mobilenet_w1_coco, simplepose_mobile_mobilenetv2b_w1_coco, simplepose_mobile_mobilenetv3_small_w1_coco, simplepose_mobile_mobilenetv3_large_w1_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208) assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944) assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744) assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176) assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088) assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
12,743
37.735562
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/cbamresnet.py
""" CBAM-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. """ __all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv7x7_block from .resnet import ResInitBlock, ResBlock, ResBottleneck class MLP(nn.Module): """ Multilayer perceptron block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(MLP, self).__init__() mid_channels = channels // reduction_ratio self.fc1 = nn.Linear( in_features=channels, out_features=mid_channels) self.activ = nn.ReLU(inplace=True) self.fc2 = nn.Linear( in_features=mid_channels, out_features=channels) def forward(self, x): x = x.view(x.size(0), -1) x = self.fc1(x) x = self.activ(x) x = self.fc2(x) return x class ChannelGate(nn.Module): """ CBAM channel gate block. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(ChannelGate, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.max_pool = nn.AdaptiveMaxPool2d(output_size=(1, 1)) self.mlp = MLP( channels=channels, reduction_ratio=reduction_ratio) self.sigmoid = nn.Sigmoid() def forward(self, x): att1 = self.avg_pool(x) att1 = self.mlp(att1) att2 = self.max_pool(x) att2 = self.mlp(att2) att = att1 + att2 att = self.sigmoid(att) att = att.unsqueeze(2).unsqueeze(3).expand_as(x) x = x * att return x class SpatialGate(nn.Module): """ CBAM spatial gate block. """ def __init__(self): super(SpatialGate, self).__init__() self.conv = conv7x7_block( in_channels=2, out_channels=1, activation=None) self.sigmoid = nn.Sigmoid() def forward(self, x): att1 = x.max(dim=1)[0].unsqueeze(1) att2 = x.mean(dim=1).unsqueeze(1) att = torch.cat((att1, att2), dim=1) att = self.conv(att) att = self.sigmoid(att) x = x * att return x class CbamBlock(nn.Module): """ CBAM attention block for CBAM-ResNet. Parameters: ---------- channels : int Number of input/output channels. reduction_ratio : int, default 16 Channel reduction ratio. """ def __init__(self, channels, reduction_ratio=16): super(CbamBlock, self).__init__() self.ch_gate = ChannelGate( channels=channels, reduction_ratio=reduction_ratio) self.sp_gate = SpatialGate() def forward(self, x): x = self.ch_gate(x) x = self.sp_gate(x) return x class CbamResUnit(nn.Module): """ CBAM-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(CbamResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=False) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.cbam = CbamBlock(channels=out_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.cbam(x) x = x + identity x = self.activ(x) return x class CbamResNet(nn.Module): """ CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), num_classes=1000): super(CbamResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), CbamResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create CBAM-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. use_se : bool Whether to use SE block. width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] else: raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 if blocks < 50: channels_per_layers = [64, 128, 256, 512] bottleneck = False else: channels_per_layers = [256, 512, 1024, 2048] bottleneck = True channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = CbamResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def cbam_resnet18(**kwargs): """ CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs) def cbam_resnet34(**kwargs): """ CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs) def cbam_resnet50(**kwargs): """ CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs) def cbam_resnet101(**kwargs): """ CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs) def cbam_resnet152(**kwargs): """ CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ cbam_resnet18, cbam_resnet34, cbam_resnet50, cbam_resnet101, cbam_resnet152, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != cbam_resnet18 or weight_count == 11779392) assert (model != cbam_resnet34 or weight_count == 21960468) assert (model != cbam_resnet50 or weight_count == 28089624) assert (model != cbam_resnet101 or weight_count == 49330172) assert (model != cbam_resnet152 or weight_count == 66826848) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,908
28.405467
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/diracnetv2.py
""" DiracNetV2 for ImageNet-1K, implemented in PyTorch. Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. """ __all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2'] import os import torch.nn as nn import torch.nn.init as init class DiracConv(nn.Module): """ DiracNetV2 specific convolution block with pre-activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding): super(DiracConv, self).__init__() self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True) def forward(self, x): x = self.activ(x) x = self.conv(x) return x def dirac_conv3x3(in_channels, out_channels): """ 3x3 version of the DiracNetV2 specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return DiracConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1) class DiracInitBlock(nn.Module): """ DiracNetV2 specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DiracInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, bias=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class DiracNetV2(nn.Module): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(DiracNetV2, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", DiracInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), dirac_conv3x3( in_channels=in_channels, out_channels=out_channels)) in_channels = out_channels if i != len(channels) - 1: stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=2, stride=2, padding=0)) self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_activ", nn.ReLU(inplace=True)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_diracnetv2(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DiracNetV2 model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 18: layers = [4, 4, 4, 4] elif blocks == 34: layers = [6, 8, 12, 6] else: raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] init_block_channels = 64 net = DiracNetV2( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def diracnet18v2(**kwargs): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs) def diracnet34v2(**kwargs): """ DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,' https://arxiv.org/abs/1706.00388. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ diracnet18v2, diracnet34v2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != diracnet18v2 or weight_count == 11511784) assert (model != diracnet34v2 or weight_count == 21616232) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
8,444
27.72449
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/sepreresnet_cifar.py
""" SE-PreResNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn', 'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn', 'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn', 'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn', 'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn', 'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn', 'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn', 'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .sepreresnet import SEPreResUnit class CIFARSEPreResNet(nn.Module): """ SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification num_classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARSEPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sepreresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SE-PreResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification num_classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARSEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sepreresnet20_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10", **kwargs) def sepreresnet20_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100", **kwargs) def sepreresnet20_svhn(num_classes=10, **kwargs): """ SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn", **kwargs) def sepreresnet56_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10", **kwargs) def sepreresnet56_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100", **kwargs) def sepreresnet56_svhn(num_classes=10, **kwargs): """ SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn", **kwargs) def sepreresnet110_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10", **kwargs) def sepreresnet110_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100", **kwargs) def sepreresnet110_svhn(num_classes=10, **kwargs): """ SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn", **kwargs) def sepreresnet164bn_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10", **kwargs) def sepreresnet164bn_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100", **kwargs) def sepreresnet164bn_svhn(num_classes=10, **kwargs): """ SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn", **kwargs) def sepreresnet272bn_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10", **kwargs) def sepreresnet272bn_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100", **kwargs) def sepreresnet272bn_svhn(num_classes=10, **kwargs): """ SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn", **kwargs) def sepreresnet542bn_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10", **kwargs) def sepreresnet542bn_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100", **kwargs) def sepreresnet542bn_svhn(num_classes=10, **kwargs): """ SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn", **kwargs) def sepreresnet1001_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar10", **kwargs) def sepreresnet1001_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar100", **kwargs) def sepreresnet1001_svhn(num_classes=10, **kwargs): """ SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_svhn", **kwargs) def sepreresnet1202_cifar10(num_classes=10, **kwargs): """ SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar10", **kwargs) def sepreresnet1202_cifar100(num_classes=100, **kwargs): """ SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 100 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar100", **kwargs) def sepreresnet1202_svhn(num_classes=10, **kwargs): """ SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (sepreresnet20_cifar10, 10), (sepreresnet20_cifar100, 100), (sepreresnet20_svhn, 10), (sepreresnet56_cifar10, 10), (sepreresnet56_cifar100, 100), (sepreresnet56_svhn, 10), (sepreresnet110_cifar10, 10), (sepreresnet110_cifar100, 100), (sepreresnet110_svhn, 10), (sepreresnet164bn_cifar10, 10), (sepreresnet164bn_cifar100, 100), (sepreresnet164bn_svhn, 10), (sepreresnet272bn_cifar10, 10), (sepreresnet272bn_cifar100, 100), (sepreresnet272bn_svhn, 10), (sepreresnet542bn_cifar10, 10), (sepreresnet542bn_cifar100, 100), (sepreresnet542bn_svhn, 10), (sepreresnet1001_cifar10, 10), (sepreresnet1001_cifar100, 100), (sepreresnet1001_svhn, 10), (sepreresnet1202_cifar10, 10), (sepreresnet1202_cifar100, 100), (sepreresnet1202_svhn, 10), ] for model, num_num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != sepreresnet20_cifar10 or weight_count == 274559) assert (model != sepreresnet20_cifar100 or weight_count == 280409) assert (model != sepreresnet20_svhn or weight_count == 274559) assert (model != sepreresnet56_cifar10 or weight_count == 862601) assert (model != sepreresnet56_cifar100 or weight_count == 868451) assert (model != sepreresnet56_svhn or weight_count == 862601) assert (model != sepreresnet110_cifar10 or weight_count == 1744664) assert (model != sepreresnet110_cifar100 or weight_count == 1750514) assert (model != sepreresnet110_svhn or weight_count == 1744664) assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882) assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012) assert (model != sepreresnet164bn_svhn or weight_count == 1904882) assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450) assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580) assert (model != sepreresnet272bn_svhn or weight_count == 3152450) assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370) assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500) assert (model != sepreresnet542bn_svhn or weight_count == 6271370) assert (model != sepreresnet1001_cifar10 or weight_count == 11573534) assert (model != sepreresnet1001_cifar100 or weight_count == 11596664) assert (model != sepreresnet1001_svhn or weight_count == 11573534) assert (model != sepreresnet1202_cifar10 or weight_count == 19581938) assert (model != sepreresnet1202_cifar100 or weight_count == 19587788) assert (model != sepreresnet1202_svhn or weight_count == 19581938) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_num_classes)) if __name__ == "__main__": _test()
24,663
37.298137
119
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/danet.py
""" DANet for image segmentation, implemented in Gluon. Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. """ __all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes', 'ScaleBlock'] import os import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from torch.nn.parameter import Parameter from .common import conv1x1, conv3x3_block from .resnetd import resnetd50b, resnetd101b class ScaleBlock(nn.Module): """ Simple scale block. """ def __init__(self): super(ScaleBlock, self).__init__() self.alpha = Parameter(torch.Tensor((1,))) def forward(self, x): return self.alpha * x def __repr__(self): s = '{name}(alpha={alpha})' return s.format( name=self.__class__.__name__, gamma=self.alpha.shape[0]) def calc_flops(self, x): assert (x.shape[0] == 1) num_flops = x.numel() num_macs = 0 return num_flops, num_macs class PosAttBlock(nn.Module): """ Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. It captures long-range spatial contextual information. Parameters: ---------- channels : int Number of channels. reduction : int, default 8 Squeeze reduction value. """ def __init__(self, channels, reduction=8): super(PosAttBlock, self).__init__() mid_channels = channels // reduction self.query_conv = conv1x1( in_channels=channels, out_channels=mid_channels, bias=True) self.key_conv = conv1x1( in_channels=channels, out_channels=mid_channels, bias=True) self.value_conv = conv1x1( in_channels=channels, out_channels=channels, bias=True) self.scale = ScaleBlock() self.softmax = nn.Softmax(dim=-1) def forward(self, x): batch, channels, height, width = x.shape proj_query = self.query_conv(x).view((batch, -1, height * width)) proj_key = self.key_conv(x).view((batch, -1, height * width)) proj_value = self.value_conv(x).view((batch, -1, height * width)) energy = proj_query.transpose(1, 2).contiguous().bmm(proj_key) w = self.softmax(energy) y = proj_value.bmm(w.transpose(1, 2).contiguous()) y = y.reshape((batch, -1, height, width)) y = self.scale(y) + x return y class ChaAttBlock(nn.Module): """ Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. It explicitly models interdependencies between channels. """ def __init__(self): super(ChaAttBlock, self).__init__() self.scale = ScaleBlock() self.softmax = nn.Softmax(dim=-1) def forward(self, x): batch, channels, height, width = x.shape proj_query = x.view((batch, -1, height * width)) proj_key = x.view((batch, -1, height * width)) proj_value = x.view((batch, -1, height * width)) energy = proj_query.bmm(proj_key.transpose(1, 2).contiguous()) energy_max, _ = energy.max(dim=-1, keepdims=True) energy_new = energy_max.expand_as(energy) - energy w = self.softmax(energy_new) y = w.bmm(proj_value) y = y.reshape((batch, -1, height, width)) y = self.scale(y) + x return y class DANetHeadBranch(nn.Module): """ DANet head branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. pose_att : bool, default True Whether to use position attention instead of channel one. """ def __init__(self, in_channels, out_channels, pose_att=True): super(DANetHeadBranch, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels) if pose_att: self.att = PosAttBlock(mid_channels) else: self.att = ChaAttBlock() self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) self.dropout = nn.Dropout(p=dropout_rate, inplace=False) def forward(self, x): x = self.conv1(x) x = self.att(x) y = self.conv2(x) x = self.conv3(y) x = self.dropout(x) return x, y class DANetHead(nn.Module): """ DANet head block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(DANetHead, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 self.branch_pa = DANetHeadBranch( in_channels=in_channels, out_channels=out_channels, pose_att=True) self.branch_ca = DANetHeadBranch( in_channels=in_channels, out_channels=out_channels, pose_att=False) self.conv = conv1x1( in_channels=mid_channels, out_channels=out_channels, bias=True) self.dropout = nn.Dropout(p=dropout_rate, inplace=False) def forward(self, x): pa_x, pa_y = self.branch_pa(x) ca_x, ca_y = self.branch_ca(x) y = pa_y + ca_y x = self.conv(y) x = self.dropout(x) return x, pa_x, ca_x class DANet(nn.Module): """ DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int, default 2048 Number of output channels form feature extractor. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default True Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (480, 480) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=19): super(DANet, self).__init__() assert (in_channels > 0) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.aux = aux self.fixed_size = fixed_size self.backbone = backbone self.head = DANetHead( in_channels=backbone_out_channels, out_channels=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] x, _ = self.backbone(x) x, y, z = self.head(x) x = F.interpolate(x, size=in_size, mode="bilinear", align_corners=True) if self.aux: y = F.interpolate(y, size=in_size, mode="bilinear", align_corners=True) z = F.interpolate(z, size=in_size, mode="bilinear", align_corners=True) return x, y, z else: return x def get_danet(backbone, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DANet model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. num_classes : int Number of segmentation classes. aux : bool, default False Whether to output an auxiliary result. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = DANet( backbone=backbone, num_classes=num_classes, aux=aux, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def danet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_danet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="danet_resnetd50b_cityscapes", **kwargs) def danet_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs): """ DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. num_classes : int, default 19 Number of segmentation classes. aux : bool, default True Whether to output an auxiliary result. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features del backbone[-1] return get_danet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="danet_resnetd101b_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch in_size = (480, 480) aux = True pretrained = False models = [ danet_resnetd50b_cityscapes, danet_resnetd101b_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, aux=aux) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427) assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555) batch = 2 num_classes = 19 x = torch.randn(batch, 3, in_size[0], in_size[1]) ys = net(x) y = ys[0] if aux else ys y.sum().backward() assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) if __name__ == "__main__": _test()
12,721
30.568238
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mobilenetv2.py
""" MobileNetV2 for ImageNet-1K, implemented in PyTorch. Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. """ __all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1', 'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block class LinearBottleneck(nn.Module): """ So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. expansion : bool Whether do expansion of channels. remove_exp_conv : bool Whether to remove expansion convolution. """ def __init__(self, in_channels, out_channels, stride, expansion, remove_exp_conv): super(LinearBottleneck, self).__init__() self.residual = (in_channels == out_channels) and (stride == 1) mid_channels = in_channels * 6 if expansion else in_channels self.use_exp_conv = (expansion or (not remove_exp_conv)) if self.use_exp_conv: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation="relu6") self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation="relu6") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.residual: x = x + identity return x class MobileNetV2(nn.Module): """ MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. remove_exp_conv : bool Whether to remove expansion convolution. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, remove_exp_conv, in_channels=3, in_size=(224, 224), num_classes=1000): super(MobileNetV2, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, stride=2, activation="relu6")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 expansion = (i != 0) or (j != 0) stage.add_module("unit{}".format(j + 1), LinearBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion, remove_exp_conv=remove_exp_conv)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, activation="relu6")) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = conv1x1( in_channels=in_channels, out_channels=num_classes, bias=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_mobilenetv2(width_scale, remove_exp_conv=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MobileNetV2 model with specific parameters. Parameters: ---------- width_scale : float Scale factor for width of layers. remove_exp_conv : bool, default False Whether to remove expansion convolution. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 final_block_channels = 1280 layers = [1, 2, 3, 4, 3, 3, 1] downsample = [0, 1, 1, 1, 0, 1, 0] channels_per_layers = [16, 24, 32, 64, 96, 160, 320] from functools import reduce channels = reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(channels_per_layers, layers, downsample), [[]]) if width_scale != 1.0: channels = [[int(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = int(init_block_channels * width_scale) if width_scale > 1.0: final_block_channels = int(final_block_channels * width_scale) net = MobileNetV2( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, remove_exp_conv=remove_exp_conv, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mobilenetv2_w1(**kwargs): """ 1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs) def mobilenetv2_w3d4(**kwargs): """ 0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs) def mobilenetv2_wd2(**kwargs): """ 0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs) def mobilenetv2_wd4(**kwargs): """ 0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs) def mobilenetv2b_w1(**kwargs): """ 1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs) def mobilenetv2b_w3d4(**kwargs): """ 0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs) def mobilenetv2b_wd2(**kwargs): """ 0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs) def mobilenetv2b_wd4(**kwargs): """ 0.25 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mobilenetv2_w1, mobilenetv2_w3d4, mobilenetv2_wd2, mobilenetv2_wd4, mobilenetv2b_w1, mobilenetv2b_w3d4, mobilenetv2b_wd2, mobilenetv2b_wd4, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mobilenetv2_w1 or weight_count == 3504960) assert (model != mobilenetv2_w3d4 or weight_count == 2627592) assert (model != mobilenetv2_wd2 or weight_count == 1964736) assert (model != mobilenetv2_wd4 or weight_count == 1516392) assert (model != mobilenetv2b_w1 or weight_count == 3503872) assert (model != mobilenetv2b_w3d4 or weight_count == 2626968) assert (model != mobilenetv2b_wd2 or weight_count == 1964448) assert (model != mobilenetv2b_wd4 or weight_count == 1516312) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,761
32.321149
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/squeezenet.py
""" SqueezeNet for ImageNet-1K, implemented in PyTorch. Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. """ __all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1'] import os import torch import torch.nn as nn import torch.nn.init as init class FireConv(nn.Module): """ SqueezeNet specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. padding : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, padding): super(FireConv, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activ(x) return x class FireUnit(nn.Module): """ SqueezeNet unit, so-called 'Fire' unit. Parameters: ---------- in_channels : int Number of input channels. squeeze_channels : int Number of output channels for squeeze convolution blocks. expand1x1_channels : int Number of output channels for expand 1x1 convolution blocks. expand3x3_channels : int Number of output channels for expand 3x3 convolution blocks. residual : bool Whether use residual connection. """ def __init__(self, in_channels, squeeze_channels, expand1x1_channels, expand3x3_channels, residual): super(FireUnit, self).__init__() self.residual = residual self.squeeze = FireConv( in_channels=in_channels, out_channels=squeeze_channels, kernel_size=1, padding=0) self.expand1x1 = FireConv( in_channels=squeeze_channels, out_channels=expand1x1_channels, kernel_size=1, padding=0) self.expand3x3 = FireConv( in_channels=squeeze_channels, out_channels=expand3x3_channels, kernel_size=3, padding=1) def forward(self, x): if self.residual: identity = x x = self.squeeze(x) y1 = self.expand1x1(x) y2 = self.expand3x3(x) out = torch.cat((y1, y2), dim=1) if self.residual: out = out + identity return out class SqueezeInitBlock(nn.Module): """ SqueezeNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. """ def __init__(self, in_channels, out_channels, kernel_size): super(SqueezeInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=2) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activ(x) return x class SqueezeNet(nn.Module): """ SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- channels : list of list of int Number of output channels for each unit. residuals : bool Whether to use residual units. init_block_kernel_size : int or tuple/list of 2 int The dimensions of the convolution window for the initial unit. init_block_channels : int Number of output channels for the initial unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, residuals, init_block_kernel_size, init_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000): super(SqueezeNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", SqueezeInitBlock( in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=3, stride=2, ceil_mode=True)) for j, out_channels in enumerate(channels_per_stage): expand_channels = out_channels // 2 squeeze_channels = out_channels // 8 stage.add_module("unit{}".format(j + 1), FireUnit( in_channels=in_channels, squeeze_channels=squeeze_channels, expand1x1_channels=expand_channels, expand3x3_channels=expand_channels, residual=((residuals is not None) and (residuals[i][j] == 1)))) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("dropout", nn.Dropout(p=0.5)) self.output = nn.Sequential() self.output.add_module("final_conv", nn.Conv2d( in_channels=in_channels, out_channels=num_classes, kernel_size=1)) self.output.add_module("final_activ", nn.ReLU(inplace=True)) self.output.add_module("final_pool", nn.AvgPool2d( kernel_size=13, stride=1)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): if 'final_conv' in name: init.normal_(module.weight, mean=0.0, std=0.01) else: init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_squeezenet(version, residual=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SqueezeNet model with specific parameters. Parameters: ---------- version : str Version of SqueezeNet ('1.0' or '1.1'). residual : bool, default False Whether to use residual connections. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == '1.0': channels = [[128, 128, 256], [256, 384, 384, 512], [512]] residuals = [[0, 1, 0], [1, 0, 1, 0], [1]] init_block_kernel_size = 7 init_block_channels = 96 elif version == '1.1': channels = [[128, 128], [256, 256], [384, 384, 512, 512]] residuals = [[0, 1], [0, 1], [0, 1, 0, 1]] init_block_kernel_size = 3 init_block_channels = 64 else: raise ValueError("Unsupported SqueezeNet version {}".format(version)) if not residual: residuals = None net = SqueezeNet( channels=channels, residuals=residuals, init_block_kernel_size=init_block_kernel_size, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def squeezenet_v1_0(**kwargs): """ SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs) def squeezenet_v1_1(**kwargs): """ SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs) def squeezeresnet_v1_0(**kwargs): """ SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs) def squeezeresnet_v1_1(**kwargs): """ SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,' https://arxiv.org/abs/1602.07360. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False models = [ squeezenet_v1_0, squeezenet_v1_1, squeezeresnet_v1_0, squeezeresnet_v1_1, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != squeezenet_v1_0 or weight_count == 1248424) assert (model != squeezenet_v1_1 or weight_count == 1235496) assert (model != squeezeresnet_v1_0 or weight_count == 1248424) assert (model != squeezeresnet_v1_1 or weight_count == 1235496) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,164
30.929134
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/nin_cifar.py
""" NIN for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Network In Network,' https://arxiv.org/abs/1312.4400. """ __all__ = ['CIFARNIN', 'nin_cifar10', 'nin_cifar100', 'nin_svhn'] import os import torch.nn as nn import torch.nn.init as init class NINConv(nn.Module): """ NIN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0): super(NINConv, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activ(x) return x class CIFARNIN(nn.Module): """ NIN model for CIFAR from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- channels : list of list of int Number of output channels for each unit. first_kernel_sizes : list of int Convolution window sizes for the first units in each stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, first_kernel_sizes, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARNIN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): if (j == 0) and (i != 0): if i == 1: stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=3, stride=2, padding=1)) else: stage.add_module("pool{}".format(i + 1), nn.AvgPool2d( kernel_size=3, stride=2, padding=1)) stage.add_module("dropout{}".format(i + 1), nn.Dropout(p=0.5)) kernel_size = first_kernel_sizes[i] if j == 0 else 1 padding = (kernel_size - 1) // 2 stage.add_module("unit{}".format(j + 1), NINConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.output = nn.Sequential() self.output.add_module("final_conv", NINConv( in_channels=in_channels, out_channels=num_classes, kernel_size=1)) self.output.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_nin_cifar(num_classes, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create NIN model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[192, 160, 96], [192, 192, 192], [192, 192]] first_kernel_sizes = [5, 5, 3] net = CIFARNIN( channels=channels, first_kernel_sizes=first_kernel_sizes, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def nin_cifar10(num_classes=10, **kwargs): """ NIN model for CIFAR-10 from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nin_cifar(num_classes=num_classes, model_name="nin_cifar10", **kwargs) def nin_cifar100(num_classes=100, **kwargs): """ NIN model for CIFAR-100 from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nin_cifar(num_classes=num_classes, model_name="nin_cifar100", **kwargs) def nin_svhn(num_classes=10, **kwargs): """ NIN model for SVHN from 'Network In Network,' https://arxiv.org/abs/1312.4400. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_nin_cifar(num_classes=num_classes, model_name="nin_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (nin_cifar10, 10), (nin_cifar100, 100), (nin_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != nin_cifar10 or weight_count == 966986) assert (model != nin_cifar100 or weight_count == 984356) assert (model != nin_svhn or weight_count == 966986) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
8,048
29.957692
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/vgg.py
""" VGG for ImageNet-1K, implemented in PyTorch. Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. """ __all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b', 'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block class VGGDense(nn.Module): """ VGG specific dense block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(VGGDense, self).__init__() self.fc = nn.Linear( in_features=in_channels, out_features=out_channels) self.activ = nn.ReLU(inplace=True) self.dropout = nn.Dropout(p=0.5) def forward(self, x): x = self.fc(x) x = self.activ(x) x = self.dropout(x) return x class VGGOutputBlock(nn.Module): """ VGG specific output block. Parameters: ---------- in_channels : int Number of input channels. classes : int Number of classification classes. """ def __init__(self, in_channels, classes): super(VGGOutputBlock, self).__init__() mid_channels = 4096 self.fc1 = VGGDense( in_channels=in_channels, out_channels=mid_channels) self.fc2 = VGGDense( in_channels=mid_channels, out_channels=mid_channels) self.fc3 = nn.Linear( in_features=mid_channels, out_features=classes) def forward(self, x): x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x class VGG(nn.Module): """ VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- channels : list of list of int Number of output channels for each unit. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, bias=True, use_bn=False, in_channels=3, in_size=(224, 224), num_classes=1000): super(VGG, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), conv3x3_block( in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn)) in_channels = out_channels stage.add_module("pool{}".format(i + 1), nn.MaxPool2d( kernel_size=2, stride=2, padding=0)) self.features.add_module("stage{}".format(i + 1), stage) self.output = VGGOutputBlock( in_channels=(in_channels * 7 * 7), classes=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_vgg(blocks, bias=True, use_bn=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create VGG model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 11: layers = [1, 1, 2, 2, 2] elif blocks == 13: layers = [2, 2, 2, 2, 2] elif blocks == 16: layers = [2, 2, 3, 3, 3] elif blocks == 19: layers = [2, 2, 4, 4, 4] else: raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = VGG( channels=channels, bias=bias, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def vgg11(**kwargs): """ VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, model_name="vgg11", **kwargs) def vgg13(**kwargs): """ VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, model_name="vgg13", **kwargs) def vgg16(**kwargs): """ VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, model_name="vgg16", **kwargs) def vgg19(**kwargs): """ VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, model_name="vgg19", **kwargs) def bn_vgg11(**kwargs): """ VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, bias=False, use_bn=True, model_name="bn_vgg11", **kwargs) def bn_vgg13(**kwargs): """ VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, bias=False, use_bn=True, model_name="bn_vgg13", **kwargs) def bn_vgg16(**kwargs): """ VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, bias=False, use_bn=True, model_name="bn_vgg16", **kwargs) def bn_vgg19(**kwargs): """ VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, bias=False, use_bn=True, model_name="bn_vgg19", **kwargs) def bn_vgg11b(**kwargs): """ VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=11, bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs) def bn_vgg13b(**kwargs): """ VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=13, bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs) def bn_vgg16b(**kwargs): """ VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=16, bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs) def bn_vgg19b(**kwargs): """ VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_vgg(blocks=19, bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ vgg11, vgg13, vgg16, vgg19, bn_vgg11, bn_vgg13, bn_vgg16, bn_vgg19, bn_vgg11b, bn_vgg13b, bn_vgg16b, bn_vgg19b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != vgg11 or weight_count == 132863336) assert (model != vgg13 or weight_count == 133047848) assert (model != vgg16 or weight_count == 138357544) assert (model != vgg19 or weight_count == 143667240) assert (model != bn_vgg11 or weight_count == 132866088) assert (model != bn_vgg13 or weight_count == 133050792) assert (model != bn_vgg16 or weight_count == 138361768) assert (model != bn_vgg19 or weight_count == 143672744) assert (model != bn_vgg11b or weight_count == 132868840) assert (model != bn_vgg13b or weight_count == 133053736) assert (model != bn_vgg16b or weight_count == 138365992) assert (model != bn_vgg19b or weight_count == 143678248) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
13,528
29.678005
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/resnet_cub.py
""" ResNet for CUB-200-2011, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub', 'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub', 'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub'] from .resnet import get_resnet def resnet10_cub(num_classes=200, **kwargs): """ ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=10, model_name="resnet10_cub", **kwargs) def resnet12_cub(num_classes=200, **kwargs): """ ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=12, model_name="resnet12_cub", **kwargs) def resnet14_cub(num_classes=200, **kwargs): """ ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=14, model_name="resnet14_cub", **kwargs) def resnetbc14b_cub(num_classes=200, **kwargs): """ ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub", **kwargs) def resnet16_cub(num_classes=200, **kwargs): """ ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=16, model_name="resnet16_cub", **kwargs) def resnet18_cub(num_classes=200, **kwargs): """ ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=18, model_name="resnet18_cub", **kwargs) def resnet26_cub(num_classes=200, **kwargs): """ ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs) def resnetbc26b_cub(num_classes=200, **kwargs): """ ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub", **kwargs) def resnet34_cub(num_classes=200, **kwargs): """ ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=34, model_name="resnet34_cub", **kwargs) def resnetbc38b_cub(num_classes=200, **kwargs): """ ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub", **kwargs) def resnet50_cub(num_classes=200, **kwargs): """ ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=50, model_name="resnet50_cub", **kwargs) def resnet50b_cub(num_classes=200, **kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs) def resnet101_cub(num_classes=200, **kwargs): """ ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=101, model_name="resnet101_cub", **kwargs) def resnet101b_cub(num_classes=200, **kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs) def resnet152_cub(num_classes=200, **kwargs): """ ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=152, model_name="resnet152_cub", **kwargs) def resnet152b_cub(num_classes=200, **kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs) def resnet200_cub(num_classes=200, **kwargs): """ ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=200, model_name="resnet200_cub", **kwargs) def resnet200b_cub(num_classes=200, **kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(num_classes=num_classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resnet10_cub, resnet12_cub, resnet14_cub, resnetbc14b_cub, resnet16_cub, resnet18_cub, resnet26_cub, resnetbc26b_cub, resnet34_cub, resnetbc38b_cub, resnet50_cub, resnet50b_cub, resnet101_cub, resnet101b_cub, resnet152_cub, resnet152b_cub, resnet200_cub, resnet200b_cub, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10_cub or weight_count == 5008392) assert (model != resnet12_cub or weight_count == 5082376) assert (model != resnet14_cub or weight_count == 5377800) assert (model != resnetbc14b_cub or weight_count == 8425736) assert (model != resnet16_cub or weight_count == 6558472) assert (model != resnet18_cub or weight_count == 11279112) assert (model != resnet26_cub or weight_count == 17549832) assert (model != resnetbc26b_cub or weight_count == 14355976) assert (model != resnet34_cub or weight_count == 21387272) assert (model != resnetbc38b_cub or weight_count == 20286216) assert (model != resnet50_cub or weight_count == 23917832) assert (model != resnet50b_cub or weight_count == 23917832) assert (model != resnet101_cub or weight_count == 42909960) assert (model != resnet101b_cub or weight_count == 42909960) assert (model != resnet152_cub or weight_count == 58553608) assert (model != resnet152b_cub or weight_count == 58553608) assert (model != resnet200_cub or weight_count == 63034632) assert (model != resnet200b_cub or weight_count == 63034632) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 200)) if __name__ == "__main__": _test()
14,148
35.094388
117
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/bagnet.py
""" BagNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. """ __all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock class BagNetBottleneck(nn.Module): """ BagNet bottleneck block for residual path in BagNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size of the second convolution. stride : int or tuple/list of 2 int Strides of the second convolution. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, kernel_size, stride, bottleneck_factor=4): super(BagNetBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = ConvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=0) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class BagNetUnit(nn.Module): """ BagNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size of the second body convolution. stride : int or tuple/list of 2 int Strides of the second body convolution. """ def __init__(self, in_channels, out_channels, kernel_size, stride): super(BagNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = BagNetBottleneck( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if x.size(-1) != identity.size(-1): diff = identity.size(-1) - x.size(-1) identity = identity[:, :, :-diff, :-diff] x = x + identity x = self.activ(x) return x class BagNetInitBlock(nn.Module): """ BagNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(BagNetInitBlock, self).__init__() self.conv1 = conv1x1( in_channels=in_channels, out_channels=out_channels) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, padding=0) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class BagNet(nn.Module): """ BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_pool_size : int Size of the pooling windows for final pool. normal_kernel_sizes : list of int Count of the first units with 3x3 convolution window size for each stage. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_pool_size, normal_kernel_sizes, in_channels=3, in_size=(224, 224), num_classes=1000): super(BagNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", BagNetInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != len(channels) - 1) else 1 kernel_size = 3 if j < normal_kernel_sizes[i] else 1 stage.add_module("unit{}".format(j + 1), BagNetUnit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=final_pool_size, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_bagnet(field, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create BagNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ layers = [3, 4, 6, 3] if field == 9: normal_kernel_sizes = [1, 1, 0, 0] final_pool_size = 27 elif field == 17: normal_kernel_sizes = [1, 1, 1, 0] final_pool_size = 26 elif field == 33: normal_kernel_sizes = [1, 1, 1, 1] final_pool_size = 24 else: raise ValueError("Unsupported BagNet with field: {}".format(field)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = BagNet( channels=channels, init_block_channels=init_block_channels, final_pool_size=final_pool_size, normal_kernel_sizes=normal_kernel_sizes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def bagnet9(**kwargs): """ BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_bagnet(field=9, model_name="bagnet9", **kwargs) def bagnet17(**kwargs): """ BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_bagnet(field=17, model_name="bagnet17", **kwargs) def bagnet33(**kwargs): """ BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,' https://openreview.net/pdf?id=SkfMWhAqYQ. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_bagnet(field=33, model_name="bagnet33", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ bagnet9, bagnet17, bagnet33, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != bagnet9 or weight_count == 15688744) assert (model != bagnet17 or weight_count == 16213032) assert (model != bagnet33 or weight_count == 18310184) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
10,903
29.373259
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/airnet.py
""" AirNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. """ __all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock'] import os import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from .common import conv1x1_block, conv3x3_block class AirBlock(nn.Module): """ AirNet attention block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int, default 1 Number of groups. ratio: int, default 2 Air compression ratio. """ def __init__(self, in_channels, out_channels, groups=1, ratio=2): super(AirBlock, self).__init__() assert (out_channels % ratio == 0) mid_channels = out_channels // ratio self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, groups=groups) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.conv1(x) x = self.pool(x) x = self.conv2(x) x = F.interpolate( input=x, scale_factor=2, mode="bilinear", align_corners=True) x = self.conv3(x) x = self.sigmoid(x) return x class AirBottleneck(nn.Module): """ AirNet bottleneck block for residual path in AirNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, ratio): super(AirBottleneck, self).__init__() mid_channels = out_channels // 4 self.use_air_block = (stride == 1 and mid_channels < 512) self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) if self.use_air_block: self.air = AirBlock( in_channels=in_channels, out_channels=mid_channels, ratio=ratio) def forward(self, x): if self.use_air_block: att = self.air(x) x = self.conv1(x) x = self.conv2(x) if self.use_air_block: x = x * att x = self.conv3(x) return x class AirUnit(nn.Module): """ AirNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. ratio: int Air compression ratio. """ def __init__(self, in_channels, out_channels, stride, ratio): super(AirUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = AirBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, ratio=ratio) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = x + identity x = self.activ(x) return x class AirInitBlock(nn.Module): """ AirNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(AirInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.pool(x) return x class AirNet(nn.Module): """ AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. ratio: int Air compression ratio. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, ratio, in_channels=3, in_size=(224, 224), num_classes=1000): super(AirNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", AirInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), AirUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, ratio=ratio)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_airnet(blocks, base_channels, ratio, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create AirNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. base_channels: int Base number of channels. ratio: int Air compression ratio. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks)) bottleneck_expansion = 4 init_block_channels = base_channels channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = AirNet( channels=channels, init_block_channels=init_block_channels, ratio=ratio, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def airnet50_1x64d_r2(**kwargs): """ AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs) def airnet50_1x64d_r16(**kwargs): """ AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs) def airnet101_1x64d_r2(**kwargs): """ AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,' https://ieeexplore.ieee.org/document/8510896. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ airnet50_1x64d_r2, airnet50_1x64d_r16, airnet101_1x64d_r2, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != airnet50_1x64d_r2 or weight_count == 27425864) assert (model != airnet50_1x64d_r16 or weight_count == 25714952) assert (model != airnet101_1x64d_r2 or weight_count == 51727432) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
12,525
28.612293
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mnasnet.py
""" MnasNet for ImageNet-1K, implemented in PyTorch. Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. """ __all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small'] import os import torch.nn as nn import torch.nn.init as init from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock class DwsExpSEResUnit(nn.Module): """ Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the second convolution layer. use_kernel3 : bool, default True Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int, default 1 Expansion factor for each unit. se_factor : int, default 0 SE reduction factor for each unit. use_skip : bool, default True Whether to use skip connection. activation : str, default 'relu' Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride=1, use_kernel3=True, exp_factor=1, se_factor=0, use_skip=True, activation="relu"): super(DwsExpSEResUnit, self).__init__() assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (stride == 1) and use_skip self.use_exp_conv = exp_factor > 1 self.use_se = se_factor > 0 mid_channels = exp_factor * in_channels dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) self.dw_conv = dwconv_block_fn( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), round_mid=False, mid_activation=activation) self.pw_conv = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.dw_conv(x) if self.use_se: x = self.se(x) x = self.pw_conv(x) if self.residual: x = x + identity return x class MnasInitBlock(nn.Module): """ MnasNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. """ def __init__(self, in_channels, out_channels, mid_channels, use_skip): super(MnasInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = DwsExpSEResUnit( in_channels=mid_channels, out_channels=out_channels, use_skip=use_skip) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class MnasFinalBlock(nn.Module): """ MnasNet specific final block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. """ def __init__(self, in_channels, out_channels, mid_channels, use_skip): super(MnasFinalBlock, self).__init__() self.conv1 = DwsExpSEResUnit( in_channels=in_channels, out_channels=mid_channels, exp_factor=6, use_skip=use_skip) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class MnasNet(nn.Module): """ MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : list of 2 int Number of output channels for the initial unit. final_block_channels : list of 2 int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. se_factors : list of list of int SE reduction factor for each unit. init_block_use_skip : bool Whether to use skip connection in the initial unit. final_block_use_skip : bool Whether to use skip connection in the final block of the feature extractor. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, se_factors, init_block_use_skip, final_block_use_skip, in_channels=3, in_size=(224, 224), num_classes=1000): super(MnasNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", MnasInitBlock( in_channels=in_channels, out_channels=init_block_channels[1], mid_channels=init_block_channels[0], use_skip=init_block_use_skip)) in_channels = init_block_channels[1] for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] se_factor = se_factors[i][j] stage.add_module("unit{}".format(j + 1), DwsExpSEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, use_kernel3=use_kernel3, exp_factor=exp_factor, se_factor=se_factor)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", MnasFinalBlock( in_channels=in_channels, out_channels=final_block_channels[1], mid_channels=final_block_channels[0], use_skip=final_block_use_skip)) in_channels = final_block_channels[1] self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_mnasnet(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MnasNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('b1', 'a1' or 'small'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "b1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]] exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]] se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]] init_block_use_skip = False final_block_use_skip = False elif version == "a1": init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]] kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]] exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]] init_block_use_skip = False final_block_use_skip = True elif version == "small": init_block_channels = [8, 8] final_block_channels = [144, 1280] channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]] kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]] exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]] se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]] init_block_use_skip = True final_block_use_skip = True else: raise ValueError("Unsupported MnasNet version {}".format(version)) if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale) net = MnasNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, se_factors=se_factors, init_block_use_skip=init_block_use_skip, final_block_use_skip=final_block_use_skip, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mnasnet_b1(**kwargs): """ MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs) def mnasnet_a1(**kwargs): """ MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs) def mnasnet_small(**kwargs): """ MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mnasnet_b1, mnasnet_a1, mnasnet_small, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mnasnet_b1 or weight_count == 4383312) assert (model != mnasnet_a1 or weight_count == 3887038) assert (model != mnasnet_small or weight_count == 2030264) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,189
32.388235
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/pyramidnet_cifar.py
""" PyramidNet for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. """ __all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn', 'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn', 'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn', 'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn', 'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn', 'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn', 'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .preresnet import PreResActivation from .pyramidnet import PyrUnit class CIFARPyramidNet(nn.Module): """ PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARPyramidNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=None)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 stage.add_module("unit{}".format(j + 1), PyrUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_pyramidnet_cifar(num_classes, blocks, alpha, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PyramidNet for CIFAR model with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. alpha : int PyramidNet's alpha value. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 init_block_channels = 16 growth_add = float(alpha) / float(sum(layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]], layers, [[init_block_channels]])[1:] channels = [[int(round(cij)) for cij in ci] for ci in channels] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARPyramidNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def pyramidnet110_a48_cifar10(num_classes=10, **kwargs): """ PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_cifar10", **kwargs) def pyramidnet110_a48_cifar100(num_classes=100, **kwargs): """ PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_cifar100", **kwargs) def pyramidnet110_a48_svhn(num_classes=10, **kwargs): """ PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=48, bottleneck=False, model_name="pyramidnet110_a48_svhn", **kwargs) def pyramidnet110_a84_cifar10(num_classes=10, **kwargs): """ PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_cifar10", **kwargs) def pyramidnet110_a84_cifar100(num_classes=100, **kwargs): """ PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_cifar100", **kwargs) def pyramidnet110_a84_svhn(num_classes=10, **kwargs): """ PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=84, bottleneck=False, model_name="pyramidnet110_a84_svhn", **kwargs) def pyramidnet110_a270_cifar10(num_classes=10, **kwargs): """ PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_cifar10", **kwargs) def pyramidnet110_a270_cifar100(num_classes=100, **kwargs): """ PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_cifar100", **kwargs) def pyramidnet110_a270_svhn(num_classes=10, **kwargs): """ PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=110, alpha=270, bottleneck=False, model_name="pyramidnet110_a270_svhn", **kwargs) def pyramidnet164_a270_bn_cifar10(num_classes=10, **kwargs): """ PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_cifar10", **kwargs) def pyramidnet164_a270_bn_cifar100(num_classes=100, **kwargs): """ PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_cifar100", **kwargs) def pyramidnet164_a270_bn_svhn(num_classes=10, **kwargs): """ PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=164, alpha=270, bottleneck=True, model_name="pyramidnet164_a270_bn_svhn", **kwargs) def pyramidnet200_a240_bn_cifar10(num_classes=10, **kwargs): """ PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_cifar10", **kwargs) def pyramidnet200_a240_bn_cifar100(num_classes=100, **kwargs): """ PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_cifar100", **kwargs) def pyramidnet200_a240_bn_svhn(num_classes=10, **kwargs): """ PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_svhn", **kwargs) def pyramidnet236_a220_bn_cifar10(num_classes=10, **kwargs): """ PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_cifar10", **kwargs) def pyramidnet236_a220_bn_cifar100(num_classes=100, **kwargs): """ PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_cifar100", **kwargs) def pyramidnet236_a220_bn_svhn(num_classes=10, **kwargs): """ PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=236, alpha=220, bottleneck=True, model_name="pyramidnet236_a220_bn_svhn", **kwargs) def pyramidnet272_a200_bn_cifar10(num_classes=10, **kwargs): """ PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_cifar10", **kwargs) def pyramidnet272_a200_bn_cifar100(num_classes=100, **kwargs): """ PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_cifar100", **kwargs) def pyramidnet272_a200_bn_svhn(num_classes=10, **kwargs): """ PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( num_classes=num_classes, blocks=272, alpha=200, bottleneck=True, model_name="pyramidnet272_a200_bn_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (pyramidnet110_a48_cifar10, 10), (pyramidnet110_a48_cifar100, 100), (pyramidnet110_a48_svhn, 10), (pyramidnet110_a84_cifar10, 10), (pyramidnet110_a84_cifar100, 100), (pyramidnet110_a84_svhn, 10), (pyramidnet110_a270_cifar10, 10), (pyramidnet110_a270_cifar100, 100), (pyramidnet110_a270_svhn, 10), (pyramidnet164_a270_bn_cifar10, 10), (pyramidnet164_a270_bn_cifar100, 100), (pyramidnet164_a270_bn_svhn, 10), (pyramidnet200_a240_bn_cifar10, 10), (pyramidnet200_a240_bn_cifar100, 100), (pyramidnet200_a240_bn_svhn, 10), (pyramidnet236_a220_bn_cifar10, 10), (pyramidnet236_a220_bn_cifar100, 100), (pyramidnet236_a220_bn_svhn, 10), (pyramidnet272_a200_bn_cifar10, 10), (pyramidnet272_a200_bn_cifar100, 100), (pyramidnet272_a200_bn_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained, num_classes=num_classes) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706) assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556) assert (model != pyramidnet110_a48_svhn or weight_count == 1772706) assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446) assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536) assert (model != pyramidnet110_a84_svhn or weight_count == 3904446) assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477) assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307) assert (model != pyramidnet110_a270_svhn or weight_count == 28485477) assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021) assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071) assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021) assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702) assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952) assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702) assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046) assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096) assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046) assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842) assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692) assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
23,823
32.413745
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/preresnet_cifar.py
""" PreResNet for CIFAR/SVHN, implemented in PyTorch. Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. """ __all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn', 'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn', 'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn', 'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn', 'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn', 'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn', 'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn', 'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3 from .preresnet import PreResUnit, PreResActivation class CIFARPreResNet(nn.Module): """ PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), PreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_preresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PreResNet model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. bottleneck : bool Whether to use a bottleneck or simple block in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def preresnet20_cifar10(num_classes=10, **kwargs): """ PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs) def preresnet20_cifar100(num_classes=100, **kwargs): """ PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100", **kwargs) def preresnet20_svhn(num_classes=10, **kwargs): """ PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs) def preresnet56_cifar10(num_classes=10, **kwargs): """ PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs) def preresnet56_cifar100(num_classes=100, **kwargs): """ PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100", **kwargs) def preresnet56_svhn(num_classes=10, **kwargs): """ PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs) def preresnet110_cifar10(num_classes=10, **kwargs): """ PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10", **kwargs) def preresnet110_cifar100(num_classes=100, **kwargs): """ PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100", **kwargs) def preresnet110_svhn(num_classes=10, **kwargs): """ PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn", **kwargs) def preresnet164bn_cifar10(num_classes=10, **kwargs): """ PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10", **kwargs) def preresnet164bn_cifar100(num_classes=100, **kwargs): """ PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100", **kwargs) def preresnet164bn_svhn(num_classes=10, **kwargs): """ PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn", **kwargs) def preresnet272bn_cifar10(num_classes=10, **kwargs): """ PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar10", **kwargs) def preresnet272bn_cifar100(num_classes=100, **kwargs): """ PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar100", **kwargs) def preresnet272bn_svhn(num_classes=10, **kwargs): """ PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="preresnet272bn_svhn", **kwargs) def preresnet542bn_cifar10(num_classes=10, **kwargs): """ PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar10", **kwargs) def preresnet542bn_cifar100(num_classes=100, **kwargs): """ PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar100", **kwargs) def preresnet542bn_svhn(num_classes=10, **kwargs): """ PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="preresnet542bn_svhn", **kwargs) def preresnet1001_cifar10(num_classes=10, **kwargs): """ PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10", **kwargs) def preresnet1001_cifar100(num_classes=100, **kwargs): """ PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100", **kwargs) def preresnet1001_svhn(num_classes=10, **kwargs): """ PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn", **kwargs) def preresnet1202_cifar10(num_classes=10, **kwargs): """ PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10", **kwargs) def preresnet1202_cifar100(num_classes=100, **kwargs): """ PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100", **kwargs) def preresnet1202_svhn(num_classes=10, **kwargs): """ PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_preresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (preresnet20_cifar10, 10), (preresnet20_cifar100, 100), (preresnet20_svhn, 10), (preresnet56_cifar10, 10), (preresnet56_cifar100, 100), (preresnet56_svhn, 10), (preresnet110_cifar10, 10), (preresnet110_cifar100, 100), (preresnet110_svhn, 10), (preresnet164bn_cifar10, 10), (preresnet164bn_cifar100, 100), (preresnet164bn_svhn, 10), (preresnet272bn_cifar10, 10), (preresnet272bn_cifar100, 100), (preresnet272bn_svhn, 10), (preresnet542bn_cifar10, 10), (preresnet542bn_cifar100, 100), (preresnet542bn_svhn, 10), (preresnet1001_cifar10, 10), (preresnet1001_cifar100, 100), (preresnet1001_svhn, 10), (preresnet1202_cifar10, 10), (preresnet1202_cifar100, 100), (preresnet1202_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != preresnet20_cifar10 or weight_count == 272282) assert (model != preresnet20_cifar100 or weight_count == 278132) assert (model != preresnet20_svhn or weight_count == 272282) assert (model != preresnet56_cifar10 or weight_count == 855578) assert (model != preresnet56_cifar100 or weight_count == 861428) assert (model != preresnet56_svhn or weight_count == 855578) assert (model != preresnet110_cifar10 or weight_count == 1730522) assert (model != preresnet110_cifar100 or weight_count == 1736372) assert (model != preresnet110_svhn or weight_count == 1730522) assert (model != preresnet164bn_cifar10 or weight_count == 1703258) assert (model != preresnet164bn_cifar100 or weight_count == 1726388) assert (model != preresnet164bn_svhn or weight_count == 1703258) assert (model != preresnet272bn_cifar10 or weight_count == 2816090) assert (model != preresnet272bn_cifar100 or weight_count == 2839220) assert (model != preresnet272bn_svhn or weight_count == 2816090) assert (model != preresnet542bn_cifar10 or weight_count == 5598170) assert (model != preresnet542bn_cifar100 or weight_count == 5621300) assert (model != preresnet542bn_svhn or weight_count == 5598170) assert (model != preresnet1001_cifar10 or weight_count == 10327706) assert (model != preresnet1001_cifar100 or weight_count == 10350836) assert (model != preresnet1001_svhn or weight_count == 10327706) assert (model != preresnet1202_cifar10 or weight_count == 19423834) assert (model != preresnet1202_cifar100 or weight_count == 19429684) assert (model != preresnet1202_svhn or weight_count == 19423834) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
24,611
35.789238
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/alphapose_coco.py
""" AlphaPose for COCO Keypoint, implemented in PyTorch. Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. """ __all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco'] import os import torch import torch.nn as nn from .common import conv3x3, DucBlock, HeatmapMaxDetBlock from .fastseresnet import fastseresnet101b class AlphaPose(nn.Module): """ AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. channels : list of int Number of output channels for each decoder unit. return_heatmap : bool, default False Whether to return only heatmap. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (256, 192) Spatial size of the expected input image. keypoints : int, default 17 Number of keypoints. """ def __init__(self, backbone, backbone_out_channels, channels, return_heatmap=False, in_channels=3, in_size=(256, 192), keypoints=17): super(AlphaPose, self).__init__() assert (in_channels == 3) self.in_size = in_size self.keypoints = keypoints self.return_heatmap = return_heatmap self.backbone = backbone self.decoder = nn.Sequential() self.decoder.add_module("init_block", nn.PixelShuffle(upscale_factor=2)) in_channels = backbone_out_channels // 4 for i, out_channels in enumerate(channels): self.decoder.add_module("unit{}".format(i + 1), DucBlock( in_channels=in_channels, out_channels=out_channels, scale_factor=2)) in_channels = out_channels self.decoder.add_module("final_block", conv3x3( in_channels=in_channels, out_channels=keypoints, bias=True)) self.heatmap_max_det = HeatmapMaxDetBlock() self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): x = self.backbone(x) heatmap = self.decoder(x) if self.return_heatmap: return heatmap else: keypoints = self.heatmap_max_det(heatmap) return keypoints def get_alphapose(backbone, backbone_out_channels, keypoints, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create AlphaPose model with specific parameters. Parameters: ---------- backbone : nn.Sequential Feature extractor. backbone_out_channels : int Number of output channels for the backbone. keypoints : int Number of keypoints. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [256, 128] net = AlphaPose( backbone=backbone, backbone_out_channels=backbone_out_channels, channels=channels, keypoints=keypoints, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs): """ AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137. Parameters: ---------- pretrained_backbone : bool, default False Whether to load the pretrained weights for feature extractor. keypoints : int, default 17 Number of keypoints. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ backbone = fastseresnet101b(pretrained=pretrained_backbone).features del backbone[-1] return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, model_name="alphapose_fastseresnet101b_coco", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): in_size = (256, 192) keypoints = 17 return_heatmap = False pretrained = False models = [ alphapose_fastseresnet101b_coco, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873) batch = 14 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) assert ((y.shape[0] == batch) and (y.shape[1] == keypoints)) if return_heatmap: assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)) else: assert (y.shape[2] == 3) if __name__ == "__main__": _test()
6,247
30.877551
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/pyramidnet.py
""" PyramidNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. """ __all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit'] import os import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from .common import pre_conv1x1_block, pre_conv3x3_block from .preresnet import PreResActivation class PyrBlock(nn.Module): """ Simple PyramidNet block for residual path in PyramidNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PyrBlock, self).__init__() self.conv1 = pre_conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.conv2 = pre_conv3x3_block( in_channels=out_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class PyrBottleneck(nn.Module): """ PyramidNet bottleneck block for residual path in PyramidNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride): super(PyrBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activate=False) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = pre_conv1x1_block( in_channels=mid_channels, out_channels=out_channels) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class PyrUnit(nn.Module): """ PyramidNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. """ def __init__(self, in_channels, out_channels, stride, bottleneck): super(PyrUnit, self).__init__() assert (out_channels >= in_channels) self.resize_identity = (stride != 1) self.identity_pad_width = (0, 0, 0, 0, 0, out_channels - in_channels) if bottleneck: self.body = PyrBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) else: self.body = PyrBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.bn = nn.BatchNorm2d(num_features=out_channels) if self.resize_identity: self.identity_pool = nn.AvgPool2d( kernel_size=2, stride=stride, ceil_mode=True) def forward(self, x): identity = x x = self.body(x) x = self.bn(x) if self.resize_identity: identity = self.identity_pool(identity) identity = F.pad(identity, pad=self.identity_pad_width) x = x + identity return x class PyrInitBlock(nn.Module): """ PyramidNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(PyrInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) self.activ = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activ(x) x = self.pool(x) return x class PyramidNet(nn.Module): """ PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), num_classes=1000): super(PyramidNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PyrInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 1 if (i == 0) or (j != 0) else 2 stage.add_module("unit{}".format(j + 1), PyrUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_pyramidnet(blocks, alpha, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create PyramidNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. alpha : int PyramidNet's alpha value. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14: layers = [2, 2, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) init_block_channels = 64 growth_add = float(alpha) / float(sum(layers)) from functools import reduce channels = reduce( lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]], layers, [[init_block_channels]])[1:] channels = [[int(round(cij)) for cij in ci] for ci in channels] if blocks < 50: bottleneck = False else: bottleneck = True channels = [[cij * 4 for cij in ci] for ci in channels] net = PyramidNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def pyramidnet101_a360(**kwargs): """ PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ pyramidnet101_a360, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != pyramidnet101_a360 or weight_count == 42455070) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
11,038
28.126649
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/seresnet.py
""" SE-ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26', 'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b', 'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, SEBlock from .resnet import ResBlock, ResBottleneck, ResInitBlock class SEResUnit(nn.Module): """ SE-ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride): super(SEResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEResNet(nn.Module): """ SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(SEResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_seresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SE-ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnet10(**kwargs): """ SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=10, model_name="seresnet10", **kwargs) def seresnet12(**kwargs): """ SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=12, model_name="seresnet12", **kwargs) def seresnet14(**kwargs): """ SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=14, model_name="seresnet14", **kwargs) def seresnet16(**kwargs): """ SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=16, model_name="seresnet16", **kwargs) def seresnet18(**kwargs): """ SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=18, model_name="seresnet18", **kwargs) def seresnet26(**kwargs): """ SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs) def seresnetbc26b(**kwargs): """ SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs) def seresnet34(**kwargs): """ SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=34, model_name="seresnet34", **kwargs) def seresnetbc38b(**kwargs): """ SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs) def seresnet50(**kwargs): """ SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, model_name="seresnet50", **kwargs) def seresnet50b(**kwargs): """ SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs) def seresnet101(**kwargs): """ SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, model_name="seresnet101", **kwargs) def seresnet101b(**kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs) def seresnet152(**kwargs): """ SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, model_name="seresnet152", **kwargs) def seresnet152b(**kwargs): """ SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs) def seresnet200(**kwargs): """ SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, model_name="seresnet200", **kwargs) def seresnet200b(**kwargs): """ SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ seresnet10, seresnet12, seresnet14, seresnet16, seresnet18, seresnet26, seresnetbc26b, seresnet34, seresnetbc38b, seresnet50, seresnet50b, seresnet101, seresnet101b, seresnet152, seresnet152b, seresnet200, seresnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet10 or weight_count == 5463332) assert (model != seresnet12 or weight_count == 5537896) assert (model != seresnet14 or weight_count == 5835504) assert (model != seresnet16 or weight_count == 7024640) assert (model != seresnet18 or weight_count == 11778592) assert (model != seresnet26 or weight_count == 18093852) assert (model != seresnetbc26b or weight_count == 17395976) assert (model != seresnet34 or weight_count == 21958868) assert (model != seresnetbc38b or weight_count == 24026616) assert (model != seresnet50 or weight_count == 28088024) assert (model != seresnet50b or weight_count == 28088024) assert (model != seresnet101 or weight_count == 49326872) assert (model != seresnet101b or weight_count == 49326872) assert (model != seresnet152 or weight_count == 66821848) assert (model != seresnet152b or weight_count == 66821848) assert (model != seresnet200 or weight_count == 71835864) assert (model != seresnet200b or weight_count == 71835864) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,211
31.579606
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/seresnet_cub.py
""" SE-ResNet for CUB-200-2011, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub', 'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub', 'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub', 'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub'] from .seresnet import get_seresnet def seresnet10_cub(num_classes=200, **kwargs): """ SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=10, model_name="seresnet10_cub", **kwargs) def seresnet12_cub(num_classes=200, **kwargs): """ SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=12, model_name="seresnet12_cub", **kwargs) def seresnet14_cub(num_classes=200, **kwargs): """ SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=14, model_name="seresnet14_cub", **kwargs) def seresnetbc14b_cub(num_classes=200, **kwargs): """ SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="seresnetbc14b_cub", **kwargs) def seresnet16_cub(num_classes=200, **kwargs): """ SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=16, model_name="seresnet16_cub", **kwargs) def seresnet18_cub(num_classes=200, **kwargs): """ SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=18, model_name="seresnet18_cub", **kwargs) def seresnet26_cub(num_classes=200, **kwargs): """ SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs) def seresnetbc26b_cub(num_classes=200, **kwargs): """ SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b_cub", **kwargs) def seresnet34_cub(num_classes=200, **kwargs): """ SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=34, model_name="seresnet34_cub", **kwargs) def seresnetbc38b_cub(num_classes=200, **kwargs): """ SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model (bottleneck compressed). Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b_cub", **kwargs) def seresnet50_cub(num_classes=200, **kwargs): """ SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=50, model_name="seresnet50_cub", **kwargs) def seresnet50b_cub(num_classes=200, **kwargs): """ SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs) def seresnet101_cub(num_classes=200, **kwargs): """ SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=101, model_name="seresnet101_cub", **kwargs) def seresnet101b_cub(num_classes=200, **kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs) def seresnet152_cub(num_classes=200, **kwargs): """ SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=152, model_name="seresnet152_cub", **kwargs) def seresnet152b_cub(num_classes=200, **kwargs): """ SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub", **kwargs) def seresnet200_cub(num_classes=200, **kwargs): """ SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=200, model_name="seresnet200_cub", **kwargs) def seresnet200b_cub(num_classes=200, **kwargs): """ SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- num_classes : int, default 200 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnet(num_classes=num_classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ seresnet10_cub, seresnet12_cub, seresnet14_cub, seresnetbc14b_cub, seresnet16_cub, seresnet18_cub, seresnet26_cub, seresnetbc26b_cub, seresnet34_cub, seresnetbc38b_cub, seresnet50_cub, seresnet50b_cub, seresnet101_cub, seresnet101b_cub, seresnet152_cub, seresnet152b_cub, seresnet200_cub, seresnet200b_cub, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnet10_cub or weight_count == 5052932) assert (model != seresnet12_cub or weight_count == 5127496) assert (model != seresnet14_cub or weight_count == 5425104) assert (model != seresnetbc14b_cub or weight_count == 9126136) assert (model != seresnet16_cub or weight_count == 6614240) assert (model != seresnet18_cub or weight_count == 11368192) assert (model != seresnet26_cub or weight_count == 17683452) assert (model != seresnetbc26b_cub or weight_count == 15756776) assert (model != seresnet34_cub or weight_count == 21548468) assert (model != seresnetbc38b_cub or weight_count == 22387416) assert (model != seresnet50_cub or weight_count == 26448824) assert (model != seresnet50b_cub or weight_count == 26448824) assert (model != seresnet101_cub or weight_count == 47687672) assert (model != seresnet101b_cub or weight_count == 47687672) assert (model != seresnet152_cub or weight_count == 65182648) assert (model != seresnet152b_cub or weight_count == 65182648) assert (model != seresnet200_cub or weight_count == 70196664) assert (model != seresnet200b_cub or weight_count == 70196664) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 200)) if __name__ == "__main__": _test()
14,391
35.808184
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/densenet.py
""" DenseNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. """ __all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import pre_conv1x1_block, pre_conv3x3_block from .preresnet import PreResInitBlock, PreResActivation class DenseUnit(nn.Module): """ DenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dropout_rate : float Parameter of Dropout layer. Faction of the input units to drop. """ def __init__(self, in_channels, out_channels, dropout_rate): super(DenseUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) bn_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bn_size self.conv1 = pre_conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = pre_conv3x3_block( in_channels=mid_channels, out_channels=inc_channels) if self.use_dropout: self.dropout = nn.Dropout(p=dropout_rate) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) if self.use_dropout: x = self.dropout(x) x = torch.cat((identity, x), dim=1) return x class TransitionBlock(nn.Module): """ DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the first unit of each stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(TransitionBlock, self).__init__() self.conv = pre_conv1x1_block( in_channels=in_channels, out_channels=out_channels) self.pool = nn.AvgPool2d( kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class DenseNet(nn.Module): """ DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dropout_rate : float, default 0.0 Parameter of Dropout layer. Faction of the input units to drop. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dropout_rate=0.0, in_channels=3, in_size=(224, 224), num_classes=1000): super(DenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", PreResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock( in_channels=in_channels, out_channels=(in_channels // 2))) in_channels = in_channels // 2 for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), DenseUnit( in_channels=in_channels, out_channels=out_channels, dropout_rate=dropout_rate)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_densenet(blocks, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DenseNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 121: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 24, 16] elif blocks == 161: init_block_channels = 96 growth_rate = 48 layers = [6, 12, 36, 24] elif blocks == 169: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 32, 32] elif blocks == 201: init_block_channels = 64 growth_rate = 32 layers = [6, 12, 48, 32] else: raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks)) from functools import reduce channels = reduce( lambda xi, yi: xi + [reduce( lambda xj, yj: xj + [xj[-1] + yj], [growth_rate] * yi, [xi[-1][-1] // 2])[1:]], layers, [[init_block_channels * 2]])[1:] net = DenseNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def densenet121(**kwargs): """ DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet(blocks=121, model_name="densenet121", **kwargs) def densenet161(**kwargs): """ DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet(blocks=161, model_name="densenet161", **kwargs) def densenet169(**kwargs): """ DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet(blocks=169, model_name="densenet169", **kwargs) def densenet201(**kwargs): """ DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_densenet(blocks=201, model_name="densenet201", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ densenet121, densenet161, densenet169, densenet201, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != densenet121 or weight_count == 7978856) assert (model != densenet161 or weight_count == 28681000) assert (model != densenet169 or weight_count == 14149480) assert (model != densenet201 or weight_count == 20013928) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,930
29.556923
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/seresnext.py
""" SE-ResNeXt for ImageNet-1K, implemented in PyTorch. Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. """ __all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, SEBlock from .resnet import ResInitBlock from .resnext import ResNeXtBottleneck class SEResNeXtUnit(nn.Module): """ SE-ResNeXt unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. """ def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SEResNeXtUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = ResNeXtBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEResNeXt(nn.Module): """ SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), num_classes=1000): super(SEResNeXt, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEResNeXtUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_seresnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create SE-ResNeXt model with specific parameters. Parameters: ---------- blocks : int Number of blocks. cardinality: int Number of groups. bottleneck_width: int Width of bottleneck block. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] else: raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEResNeXt( channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnext50_32x4d(**kwargs): """ SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs) def seresnext101_32x4d(**kwargs): """ SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs) def seresnext101_64x4d(**kwargs): """ SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ seresnext50_32x4d, seresnext101_32x4d, seresnext101_64x4d, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != seresnext50_32x4d or weight_count == 27559896) assert (model != seresnext101_32x4d or weight_count == 48955416) assert (model != seresnext101_64x4d or weight_count == 88232984) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
8,721
29.929078
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/darts.py
""" DARTS for ImageNet-1K, implemented in PyTorch. Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. """ __all__ = ['DARTS', 'darts'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import conv1x1, Identity from .nasnet import nasnet_dual_path_sequential class DwsConv(nn.Module): """ Standard dilated depthwise separable convolution block with. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int Dilation value for convolution layer. bias : bool, default False Whether the layers use a bias vector. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, bias=False): super(DwsConv, self).__init__() self.dw_conv = nn.Conv2d( in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias) self.pw_conv = conv1x1( in_channels=in_channels, out_channels=out_channels, bias=bias) def forward(self, x): x = self.dw_conv(x) x = self.pw_conv(x) return x class DartsConv(nn.Module): """ DARTS specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activate=True): super(DartsConv, self).__init__() self.activate = activate if self.activate: self.activ = nn.ReLU(inplace=False) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) def forward(self, x): if self.activate: x = self.activ(x) x = self.conv(x) x = self.bn(x) return x def darts_conv1x1(in_channels, out_channels, activate=True): """ 1x1 version of the DARTS specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool, default True Whether activate the convolution block. """ return DartsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, activate=activate) def darts_conv3x3_s2(in_channels, out_channels, activate=True): """ 3x3 version of the DARTS specific convolution block with stride 2. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. activate : bool, default True Whether activate the convolution block. """ return DartsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, activate=activate) class DartsDwsConv(nn.Module): """ DARTS specific dilated convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int Dilation value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation): super(DartsDwsConv, self).__init__() self.activ = nn.ReLU(inplace=False) self.conv = DwsConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) def forward(self, x): x = self.activ(x) x = self.conv(x) x = self.bn(x) return x class DartsDwsBranch(nn.Module): """ DARTS specific block with depthwise separable convolution layers. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding): super(DartsDwsBranch, self).__init__() mid_channels = in_channels self.conv1 = DartsDwsConv( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=1) self.conv2 = DartsDwsConv( in_channels=mid_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding, dilation=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class DartsReduceBranch(nn.Module): """ DARTS specific factorized reduce block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 2 Strides of the convolution. """ def __init__(self, in_channels, out_channels, stride=2): super(DartsReduceBranch, self).__init__() assert (out_channels % 2 == 0) mid_channels = out_channels // 2 self.activ = nn.ReLU(inplace=False) self.conv1 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.conv2 = conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=stride) self.bn = nn.BatchNorm2d(num_features=out_channels) def forward(self, x): x = self.activ(x) x1 = self.conv1(x) x = x[:, :, 1:, 1:].contiguous() x2 = self.conv2(x) x = torch.cat((x1, x2), dim=1) x = self.bn(x) return x class Stem1Unit(nn.Module): """ DARTS Stem1 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(Stem1Unit, self).__init__() mid_channels = out_channels // 2 self.conv1 = darts_conv3x3_s2( in_channels=in_channels, out_channels=mid_channels, activate=False) self.conv2 = darts_conv3x3_s2( in_channels=mid_channels, out_channels=out_channels, activate=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def stem2_unit(in_channels, out_channels): """ DARTS Stem2 unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return darts_conv3x3_s2( in_channels=in_channels, out_channels=out_channels, activate=True) def darts_maxpool3x3(channels, stride): """ DARTS specific 3x3 Max pooling layer. Parameters: ---------- channels : int Number of input/output channels. Unused parameter. stride : int or tuple/list of 2 int Strides of the convolution. """ assert (channels > 0) return nn.MaxPool2d( kernel_size=3, stride=stride, padding=1) def darts_skip_connection(channels, stride): """ DARTS specific skip connection layer. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ assert (channels > 0) if stride == 1: return Identity() else: assert (stride == 2) return DartsReduceBranch( in_channels=channels, out_channels=channels, stride=stride) def darts_dws_conv3x3(channels, stride): """ 3x3 version of DARTS specific dilated convolution block. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ return DartsDwsConv( in_channels=channels, out_channels=channels, kernel_size=3, stride=stride, padding=2, dilation=2) def darts_dws_branch3x3(channels, stride): """ 3x3 version of DARTS specific dilated convolution branch. Parameters: ---------- channels : int Number of input/output channels. stride : int or tuple/list of 2 int Strides of the convolution. """ return DartsDwsBranch( in_channels=channels, out_channels=channels, kernel_size=3, stride=stride, padding=1) # Set of operations in genotype. GENOTYPE_OPS = { 'max_pool_3x3': darts_maxpool3x3, 'skip_connect': darts_skip_connection, 'dil_conv_3x3': darts_dws_conv3x3, 'sep_conv_3x3': darts_dws_branch3x3, } class DartsMainBlock(nn.Module): """ DARTS main block, described by genotype. Parameters: ---------- genotype : list of tuples (str, int) List of genotype elements (operations and linked indices). channels : int Number of input/output channels. reduction : bool Whether use reduction. """ def __init__(self, genotype, channels, reduction): super(DartsMainBlock, self).__init__() self.concat = [2, 3, 4, 5] op_names, indices = zip(*genotype) self.indices = indices self.steps = len(op_names) // 2 self.ops = nn.ModuleList() for name, index in zip(op_names, indices): stride = 2 if reduction and index < 2 else 1 op = GENOTYPE_OPS[name](channels, stride) self.ops += [op] def forward(self, x, x_prev): s0 = x_prev s1 = x states = [s0, s1] for i in range(self.steps): j1 = 2 * i j2 = 2 * i + 1 op1 = self.ops[j1] op2 = self.ops[j2] y1 = states[self.indices[j1]] y2 = states[self.indices[j2]] y1 = op1(y1) y2 = op2(y2) s = y1 + y2 states += [s] x_out = torch.cat([states[i] for i in self.concat], dim=1) return x_out class DartsUnit(nn.Module): """ DARTS unit. Parameters: ---------- in_channels : int Number of input channels. prev_in_channels : int Number of input channels in previous input. out_channels : int Number of output channels. genotype : list of tuples (str, int) List of genotype elements (operations and linked indices). reduction : bool Whether use reduction. prev_reduction : bool Whether use previous reduction. """ def __init__(self, in_channels, prev_in_channels, out_channels, genotype, reduction, prev_reduction): super(DartsUnit, self).__init__() mid_channels = out_channels // 4 if prev_reduction: self.preprocess_prev = DartsReduceBranch( in_channels=prev_in_channels, out_channels=mid_channels) else: self.preprocess_prev = darts_conv1x1( in_channels=prev_in_channels, out_channels=mid_channels) self.preprocess = darts_conv1x1( in_channels=in_channels, out_channels=mid_channels) self.body = DartsMainBlock( genotype=genotype, channels=mid_channels, reduction=reduction) def forward(self, x, x_prev): x = self.preprocess(x) x_prev = self.preprocess_prev(x_prev) x_out = self.body(x, x_prev) return x_out class DARTS(nn.Module): """ DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. Parameters: ---------- channels : list of list of int Number of output channels for each unit. stem_blocks_channels : int Number of output channels for the Stem units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, stem_blocks_channels, normal_genotype, reduce_genotype, in_channels=3, in_size=(224, 224), num_classes=1000): super(DARTS, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nasnet_dual_path_sequential( return_two=False, first_ordinals=2, last_ordinals=1) self.features.add_module("stem1_unit", Stem1Unit( in_channels=in_channels, out_channels=stem_blocks_channels)) in_channels = stem_blocks_channels self.features.add_module("stem2_unit", stem2_unit( in_channels=in_channels, out_channels=stem_blocks_channels)) prev_in_channels = in_channels in_channels = stem_blocks_channels for i, channels_per_stage in enumerate(channels): stage = nasnet_dual_path_sequential() for j, out_channels in enumerate(channels_per_stage): reduction = (i != 0) and (j == 0) prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)) genotype = reduce_genotype if reduction else normal_genotype stage.add_module("unit{}".format(j + 1), DartsUnit( in_channels=in_channels, prev_in_channels=prev_in_channels, out_channels=out_channels, genotype=genotype, reduction=reduction, prev_reduction=prev_reduction)) prev_in_channels = in_channels in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_darts(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DARTS model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ stem_blocks_channels = 48 layers = [4, 5, 5] channels_per_layers = [192, 384, 768] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] normal_genotype = [ ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)] reduce_genotype = [ ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)] net = DARTS( channels=channels, stem_blocks_channels=stem_blocks_channels, normal_genotype=normal_genotype, reduce_genotype=reduce_genotype, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def darts(**kwargs): """ DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_darts(model_name="darts", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ darts, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != darts or weight_count == 4718752) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
20,291
26.683492
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/drn.py
""" DRN for ImageNet-1K, implemented in PyTorch. Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. """ __all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105'] import os import torch.nn as nn import torch.nn.init as init class DRNConv(nn.Module): """ DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int Dilation value for convolution layer. activate : bool Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, activate): super(DRNConv, self).__init__() self.activate = activate self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False) self.bn = nn.BatchNorm2d(num_features=out_channels) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def drn_conv1x1(in_channels, out_channels, stride, activate): """ 1x1 version of the DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. activate : bool Whether activate the convolution block. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, dilation=1, activate=activate) def drn_conv3x3(in_channels, out_channels, stride, dilation, activate): """ 3x3 version of the DRN specific convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int Padding/dilation value for convolution layer. activate : bool Whether activate the convolution block. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, activate=activate) class DRNBlock(nn.Module): """ Simple DRN block for residual path in DRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int Padding/dilation value for convolution layers. """ def __init__(self, in_channels, out_channels, stride, dilation): super(DRNBlock, self).__init__() self.conv1 = drn_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation, activate=True) self.conv2 = drn_conv3x3( in_channels=out_channels, out_channels=out_channels, stride=1, dilation=dilation, activate=False) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class DRNBottleneck(nn.Module): """ DRN bottleneck block for residual path in DRN unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int Padding/dilation value for 3x3 convolution layer. """ def __init__(self, in_channels, out_channels, stride, dilation): super(DRNBottleneck, self).__init__() mid_channels = out_channels // 4 self.conv1 = drn_conv1x1( in_channels=in_channels, out_channels=mid_channels, stride=1, activate=True) self.conv2 = drn_conv3x3( in_channels=mid_channels, out_channels=mid_channels, stride=stride, dilation=dilation, activate=True) self.conv3 = drn_conv1x1( in_channels=mid_channels, out_channels=out_channels, stride=1, activate=False) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class DRNUnit(nn.Module): """ DRN unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. dilation : int or tuple/list of 2 int Padding/dilation value for 3x3 convolution layers. bottleneck : bool Whether to use a bottleneck or simple block in units. simplified : bool Whether to use a simple or simplified block in units. residual : bool Whether do residual calculations. """ def __init__(self, in_channels, out_channels, stride, dilation, bottleneck, simplified, residual): super(DRNUnit, self).__init__() assert residual or (not bottleneck) assert (not (bottleneck and simplified)) assert (not (residual and simplified)) self.residual = residual self.resize_identity = ((in_channels != out_channels) or (stride != 1)) and self.residual and (not simplified) if bottleneck: self.body = DRNBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation) elif simplified: self.body = drn_conv3x3( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation, activate=False) else: self.body = DRNBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilation) if self.resize_identity: self.identity_conv = drn_conv1x1( in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) if self.residual: x = x + identity x = self.activ(x) return x def drn_init_block(in_channels, out_channels): """ DRN specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ return DRNConv( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=1, padding=3, dilation=1, activate=True) class DRN(nn.Module): """ DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. dilations : list of list of int Dilation values for 3x3 convolution layers for each unit. bottlenecks : list of list of int Whether to use a bottleneck or simple block in each unit. simplifieds : list of list of int Whether to use a simple or simplified block in each unit. residuals : list of list of int Whether to use residual block in each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, dilations, bottlenecks, simplifieds, residuals, in_channels=3, in_size=(224, 224), num_classes=1000): super(DRN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", drn_init_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), DRNUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, dilation=dilations[i][j], bottleneck=(bottlenecks[i][j] == 1), simplified=(simplifieds[i][j] == 1), residual=(residuals[i][j] == 1))) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=28, stride=1)) self.output = nn.Conv2d( in_channels=in_channels, out_channels=num_classes, kernel_size=1) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_drn(blocks, simplified=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DRN-C or DRN-D model with specific parameters. Parameters: ---------- blocks : int Number of blocks. simplified : bool, default False Whether to use simplified scheme (D architecture). model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 22: assert simplified layers = [1, 1, 2, 2, 2, 2, 1, 1] elif blocks == 26: layers = [1, 1, 2, 2, 2, 2, 1, 1] elif blocks == 38: assert simplified layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 42: layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 54: assert simplified layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 58: layers = [1, 1, 3, 4, 6, 3, 1, 1] elif blocks == 105: assert simplified layers = [1, 1, 3, 4, 23, 3, 1, 1] else: raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks)) if blocks < 50: channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512] bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0] else: channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512] bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0] if simplified: simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1] residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0] else: simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0] residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0] dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1] downsample = [0, 1, 1, 1, 0, 0, 0, 0] def expand(property_per_layers): from functools import reduce return reduce( lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]], zip(property_per_layers, layers, downsample), [[]]) channels = expand(channels_per_layers) dilations = expand(dilations_per_layers) bottlenecks = expand(bottlenecks_per_layers) residuals = expand(residuals_per_layers) simplifieds = expand(simplifieds_per_layers) init_block_channels = channels_per_layers[0] net = DRN( channels=channels, init_block_channels=init_block_channels, dilations=dilations, bottlenecks=bottlenecks, simplifieds=simplifieds, residuals=residuals, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def drnc26(**kwargs): """ DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=26, model_name="drnc26", **kwargs) def drnc42(**kwargs): """ DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=42, model_name="drnc42", **kwargs) def drnc58(**kwargs): """ DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=58, model_name="drnc58", **kwargs) def drnd22(**kwargs): """ DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs) def drnd38(**kwargs): """ DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs) def drnd54(**kwargs): """ DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs) def drnd105(**kwargs): """ DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ drnc26, drnc42, drnc58, drnd22, drnd38, drnd54, drnd105, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != drnc26 or weight_count == 21126584) assert (model != drnc42 or weight_count == 31234744) assert (model != drnc58 or weight_count == 40542008) # 41591608 assert (model != drnd22 or weight_count == 16393752) assert (model != drnd38 or weight_count == 26501912) assert (model != drnd54 or weight_count == 35809176) assert (model != drnd105 or weight_count == 54801304) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
18,826
28.695584
118
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/mixnet.py
""" MixNet for ImageNet-1K, implemented in PyTorch. Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. """ __all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l'] import os import torch import torch.nn as nn import torch.nn.init as init from .common import round_channels, get_activation_layer, conv1x1_block, conv3x3_block, dwconv3x3_block, SEBlock class MixConv(nn.Module): """ Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. axis : int, default 1 The axis on which to concatenate the outputs. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, axis=1): super(MixConv, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] padding = padding if isinstance(padding, list) else [padding] kernel_count = len(kernel_size) self.splitted_in_channels = self.split_channels(in_channels, kernel_count) splitted_out_channels = self.split_channels(out_channels, kernel_count) for i, kernel_size_i in enumerate(kernel_size): in_channels_i = self.splitted_in_channels[i] out_channels_i = splitted_out_channels[i] padding_i = padding[i] self.add_module( name=str(i), module=nn.Conv2d( in_channels=in_channels_i, out_channels=out_channels_i, kernel_size=kernel_size_i, stride=stride, padding=padding_i, dilation=dilation, groups=(out_channels_i if out_channels == groups else groups), bias=bias)) self.axis = axis def forward(self, x): xx = torch.split(x, self.splitted_in_channels, dim=self.axis) out = [conv_i(x_i) for x_i, conv_i in zip(xx, self._modules.values())] x = torch.cat(tuple(out), dim=self.axis) return x @staticmethod def split_channels(channels, kernel_count): splitted_channels = [channels // kernel_count] * kernel_count splitted_channels[0] += channels - sum(splitted_channels) return splitted_channels class MixConvBlock(nn.Module): """ Mixed convolution block with Batch normalization and activation. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. activate : bool, default True Whether activate the convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(MixConvBlock, self).__init__() self.activate = (activation is not None) self.use_bn = use_bn self.conv = MixConv( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if self.use_bn: self.bn = nn.BatchNorm2d( num_features=out_channels, eps=bn_eps) if self.activate: self.activ = get_activation_layer(activation) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn(x) if self.activate: x = self.activ(x) return x def mixconv1x1_block(in_channels, out_channels, kernel_count, stride=1, groups=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 1x1 version of the mixed convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_count : int Kernel count. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str, or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return MixConvBlock( in_channels=in_channels, out_channels=out_channels, kernel_size=([1] * kernel_count), stride=stride, padding=([0] * kernel_count), groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class MixUnit(nn.Module): """ MixNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. exp_channels : int Number of middle (expanded) channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. exp_kernel_count : int Expansion convolution kernel count for each unit. conv1_kernel_count : int Conv1 kernel count for each unit. conv2_kernel_count : int Conv2 kernel count for each unit. exp_factor : int Expansion factor for each unit. se_factor : int SE reduction factor for each unit. activation : str Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, exp_kernel_count, conv1_kernel_count, conv2_kernel_count, exp_factor, se_factor, activation): super(MixUnit, self).__init__() assert (exp_factor >= 1) assert (se_factor >= 0) self.residual = (in_channels == out_channels) and (stride == 1) self.use_se = se_factor > 0 mid_channels = exp_factor * in_channels self.use_exp_conv = exp_factor > 1 if self.use_exp_conv: if exp_kernel_count == 1: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=activation) else: self.exp_conv = mixconv1x1_block( in_channels=in_channels, out_channels=mid_channels, kernel_count=exp_kernel_count, activation=activation) if conv1_kernel_count == 1: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation=activation) else: self.conv1 = MixConvBlock( in_channels=mid_channels, out_channels=mid_channels, kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)], stride=stride, padding=[1 + i for i in range(conv1_kernel_count)], groups=mid_channels, activation=activation) if self.use_se: self.se = SEBlock( channels=mid_channels, reduction=(exp_factor * se_factor), round_mid=False, mid_activation=activation) if conv2_kernel_count == 1: self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) else: self.conv2 = mixconv1x1_block( in_channels=mid_channels, out_channels=out_channels, kernel_count=conv2_kernel_count, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) if self.use_se: x = self.se(x) x = self.conv2(x) if self.residual: x = x + identity return x class MixInitBlock(nn.Module): """ MixNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(MixInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.conv2 = MixUnit( in_channels=out_channels, out_channels=out_channels, stride=1, exp_kernel_count=1, conv1_kernel_count=1, conv2_kernel_count=1, exp_factor=1, se_factor=0, activation="relu") def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class MixNet(nn.Module): """ MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. exp_kernel_counts : list of list of int Expansion convolution kernel count for each unit. conv1_kernel_counts : list of list of int Conv1 kernel count for each unit. conv2_kernel_counts : list of list of int Conv2 kernel count for each unit. exp_factors : list of list of int Expansion factor for each unit. se_factors : list of list of int SE reduction factor for each unit. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, exp_kernel_counts, conv1_kernel_counts, conv2_kernel_counts, exp_factors, se_factors, in_channels=3, in_size=(224, 224), num_classes=1000): super(MixNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", MixInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if ((j == 0) and (i != 3)) or ((j == len(channels_per_stage) // 2) and (i == 3)) else 1 exp_kernel_count = exp_kernel_counts[i][j] conv1_kernel_count = conv1_kernel_counts[i][j] conv2_kernel_count = conv2_kernel_counts[i][j] exp_factor = exp_factors[i][j] se_factor = se_factors[i][j] activation = "relu" if i == 0 else "swish" stage.add_module("unit{}".format(j + 1), MixUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, exp_kernel_count=exp_kernel_count, conv1_kernel_count=conv1_kernel_count, conv2_kernel_count=conv2_kernel_count, exp_factor=exp_factor, se_factor=se_factor, activation=activation)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_mixnet(version, width_scale, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create MixNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('s' or 'm'). width_scale : float Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "s": init_block_channels = 16 channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]] exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]] conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]] conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]] exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]] se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]] elif version == "m": init_block_channels = 24 channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]] exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]] conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]] conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]] exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]] se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]] else: raise ValueError("Unsupported MixNet version {}".format(version)) final_block_channels = 1536 if width_scale != 1.0: channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels] init_block_channels = round_channels(init_block_channels * width_scale) net = MixNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, exp_kernel_counts=exp_kernel_counts, conv1_kernel_counts=conv1_kernel_counts, conv2_kernel_counts=conv2_kernel_counts, exp_factors=exp_factors, se_factors=se_factors, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def mixnet_s(**kwargs): """ MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs) def mixnet_m(**kwargs): """ MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs) def mixnet_l(**kwargs): """ MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ mixnet_s, mixnet_m, mixnet_l, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != mixnet_s or weight_count == 4134606) assert (model != mixnet_m or weight_count == 5014382) assert (model != mixnet_l or weight_count == 7329252) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
20,528
33.386935
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/dabnet.py
""" DABNet for image segmentation, implemented in PyTorch. Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. """ __all__ = ['DABNet', 'dabnet_cityscapes'] import os import torch import torch.nn as nn from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\ DualPathSequential class DwaConvBlock(nn.Module): """ Depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. kernel_size : int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ def __init__(self, channels, kernel_size, stride, padding, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): super(DwaConvBlock, self).__init__() self.conv1 = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(kernel_size, 1), stride=stride, padding=(padding, 0), dilation=(dilation, 1), groups=channels, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) self.conv2 = ConvBlock( in_channels=channels, out_channels=channels, kernel_size=(1, kernel_size), stride=stride, padding=(0, padding), dilation=(1, dilation), groups=channels, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def dwa_conv3x3_block(channels, stride=1, padding=1, dilation=1, bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: nn.ReLU(inplace=True))): """ 3x3 version of the depthwise asymmetric separable convolution block. Parameters: ---------- channels : int Number of input/output channels. stride : int, default 1 Strides of the convolution. padding : int, default 1 Padding value for convolution layer. dilation : int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. activation : function or str or None, default nn.ReLU(inplace=True) Activation function or name of activation function. """ return DwaConvBlock( channels=channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) class DABBlock(nn.Module): """ DABNet specific base block. Parameters: ---------- channels : int Number of input/output channels. dilation : int Dilation value for a dilated branch in the unit. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, channels, dilation, bn_eps): super(DABBlock, self).__init__() mid_channels = channels // 2 self.norm_activ1 = NormActivation( in_channels=channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(channels))) self.conv1 = conv3x3_block( in_channels=channels, out_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels))) self.branches = Concurrent(stack=True) self.branches.add_module("branches1", dwa_conv3x3_block( channels=mid_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels)))) self.branches.add_module("branches2", dwa_conv3x3_block( channels=mid_channels, padding=dilation, dilation=dilation, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels)))) self.norm_activ2 = NormActivation( in_channels=mid_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels))) self.conv2 = conv1x1( in_channels=mid_channels, out_channels=channels) def forward(self, x): identity = x x = self.norm_activ1(x) x = self.conv1(x) x = self.branches(x) x = x.sum(dim=1) x = self.norm_activ2(x) x = self.conv2(x) x = x + identity return x class DownBlock(nn.Module): """ DABNet specific downsample block for the main branch. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(DownBlock, self).__init__() self.expand = (in_channels < out_channels) mid_channels = out_channels - in_channels if self.expand else out_channels self.conv = conv3x3( in_channels=in_channels, out_channels=mid_channels, stride=2) if self.expand: self.pool = nn.MaxPool2d( kernel_size=2, stride=2) self.norm_activ = NormActivation( in_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) def forward(self, x): y = self.conv(x) if self.expand: z = self.pool(x) y = torch.cat((y, z), dim=1) y = self.norm_activ(y) return y class DABUnit(nn.Module): """ DABNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilations : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, dilations, bn_eps): super(DABUnit, self).__init__() mid_channels = out_channels // 2 self.down = DownBlock( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps) self.blocks = nn.Sequential() for i, dilation in enumerate(dilations): self.blocks.add_module("block{}".format(i + 1), DABBlock( channels=mid_channels, dilation=dilation, bn_eps=bn_eps)) def forward(self, x): x = self.down(x) y = self.blocks(x) x = torch.cat((y, x), dim=1) return x class DABStage(nn.Module): """ DABNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. dilations : list of int Dilations for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, x_channels, y_in_channels, y_out_channels, dilations, bn_eps): super(DABStage, self).__init__() self.use_unit = (len(dilations) > 0) self.x_down = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) if self.use_unit: self.unit = DABUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), dilations=dilations, bn_eps=bn_eps) self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(y_out_channels))) def forward(self, y, x): x = self.x_down(x) if self.use_unit: y = self.unit(y) y = torch.cat((y, x), dim=1) y = self.norm_activ(y) return y, x class DABInitBlock(nn.Module): """ DABNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(DABInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class DABNet(nn.Module): """ DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilations : list of list of int Dilations for blocks. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, channels, init_block_channels, dilations, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(DABNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) self.features.add_module("init_block", DABInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) y_in_channels = init_block_channels for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)): self.features.add_module("stage{}".format(i + 1), DABStage( x_channels=in_channels, y_in_channels=y_in_channels, y_out_channels=y_out_channels, dilations=dilations_i, bn_eps=bn_eps)) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=num_classes) self.up = InterpolationBlock( scale_factor=8, align_corners=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] y = self.features(x, x) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_dabnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create DABNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 channels = [35, 131, 259] dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]] bn_eps = 1e-3 net = DABNet( channels=channels, init_block_channels=init_block_channels, dilations=dilations, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def dabnet_cityscapes(num_classes=19, **kwargs): """ DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,' https://arxiv.org/abs/1907.11357. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_dabnet(num_classes=num_classes, model_name="dabnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ dabnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != dabnet_cityscapes or weight_count == 756643) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
16,345
28.505415
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/cgnet.py
""" CGNet for image segmentation, implemented in PyTorch. Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. """ __all__ = ['CGNet', 'cgnet_cityscapes'] import os import torch import torch.nn as nn from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\ DualPathSequential, InterpolationBlock class CGBlock(nn.Module): """ CGNet block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. dilation : int Dilation value. se_reduction : int SE-block reduction value. down : bool Whether to downsample. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, dilation, se_reduction, down, bn_eps): super(CGBlock, self).__init__() self.down = down if self.down: mid1_channels = out_channels mid2_channels = 2 * out_channels else: mid1_channels = out_channels // 2 mid2_channels = out_channels if self.down: self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) else: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid1_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid1_channels))) self.branches = Concurrent() self.branches.add_module("branches1", depthwise_conv3x3(channels=mid1_channels)) self.branches.add_module("branches2", depthwise_conv3x3( channels=mid1_channels, padding=dilation, dilation=dilation)) self.norm_activ = NormActivation( in_channels=mid2_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid2_channels))) if self.down: self.conv2 = conv1x1( in_channels=mid2_channels, out_channels=out_channels) self.se = SEBlock( channels=out_channels, reduction=se_reduction, use_conv=False) def forward(self, x): if not self.down: identity = x x = self.conv1(x) x = self.branches(x) x = self.norm_activ(x) if self.down: x = self.conv2(x) x = self.se(x) if not self.down: x += identity return x class CGUnit(nn.Module): """ CGNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. layers : int Number of layers. dilation : int Dilation value. se_reduction : int SE-block reduction value. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, layers, dilation, se_reduction, bn_eps): super(CGUnit, self).__init__() mid_channels = out_channels // 2 self.down = CGBlock( in_channels=in_channels, out_channels=mid_channels, dilation=dilation, se_reduction=se_reduction, down=True, bn_eps=bn_eps) self.blocks = nn.Sequential() for i in range(layers - 1): self.blocks.add_module("block{}".format(i + 1), CGBlock( in_channels=mid_channels, out_channels=mid_channels, dilation=dilation, se_reduction=se_reduction, down=False, bn_eps=bn_eps)) def forward(self, x): x = self.down(x) y = self.blocks(x) x = torch.cat((y, x), dim=1) # NB: This differs from the original implementation. return x class CGStage(nn.Module): """ CGNet stage. Parameters: ---------- x_channels : int Number of input/output channels for x. y_in_channels : int Number of input channels for y. y_out_channels : int Number of output channels for y. layers : int Number of layers in the unit. dilation : int Dilation for blocks. se_reduction : int SE-block reduction value for blocks. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, x_channels, y_in_channels, y_out_channels, layers, dilation, se_reduction, bn_eps): super(CGStage, self).__init__() self.use_x = (x_channels > 0) self.use_unit = (layers > 0) if self.use_x: self.x_down = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) if self.use_unit: self.unit = CGUnit( in_channels=y_in_channels, out_channels=(y_out_channels - x_channels), layers=layers, dilation=dilation, se_reduction=se_reduction, bn_eps=bn_eps) self.norm_activ = NormActivation( in_channels=y_out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(y_out_channels))) def forward(self, y, x=None): if self.use_unit: y = self.unit(y) if self.use_x: x = self.x_down(x) y = torch.cat((y, x), dim=1) y = self.norm_activ(y) return y, x class CGInitBlock(nn.Module): """ CGNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(CGInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.conv3 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class CGNet(nn.Module): """ CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- layers : list of int Number of layers for each unit. channels : list of int Number of output channels for each unit (for y-branch). init_block_channels : int Number of output channels for the initial unit. dilations : list of int Dilations for each unit. se_reductions : list of int SE-block reduction value for each unit. cut_x : list of int Whether to concatenate with x-branch for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. aux : bool, default False Whether to output an auxiliary result. fixed_size : bool, default False Whether to expect fixed spatial size of input image. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (1024, 2048) Spatial size of the expected input image. num_classes : int, default 19 Number of segmentation classes. """ def __init__(self, layers, channels, init_block_channels, dilations, se_reductions, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(CGNet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.features = DualPathSequential( return_two=False, first_ordinals=1, last_ordinals=0) self.features.add_module("init_block", CGInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) y_in_channels = init_block_channels for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)): self.features.add_module("stage{}".format(i + 1), CGStage( x_channels=in_channels if cut_x[i] == 1 else 0, y_in_channels=y_in_channels, y_out_channels=y_out_channels, layers=layers_i, dilation=dilations[i], se_reduction=se_reductions[i], bn_eps=bn_eps)) y_in_channels = y_out_channels self.classifier = conv1x1( in_channels=y_in_channels, out_channels=num_classes) self.up = InterpolationBlock( scale_factor=8, align_corners=False) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): nn.init.kaiming_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) def forward(self, x): in_size = self.in_size if self.fixed_size else x.shape[2:] y = self.features(x, x) y = self.classifier(y) y = self.up(y, size=in_size) return y def get_cgnet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create CGNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ init_block_channels = 32 layers = [0, 3, 21] channels = [35, 131, 256] dilations = [0, 2, 4] se_reductions = [0, 8, 16] cut_x = [1, 1, 0] bn_eps = 1e-3 net = CGNet( layers=layers, channels=channels, init_block_channels=init_block_channels, dilations=dilations, se_reductions=se_reductions, cut_x=cut_x, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def cgnet_cityscapes(num_classes=19, **kwargs): """ CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,' https://arxiv.org/abs/1811.08201. Parameters: ---------- num_classes : int, default 19 Number of segmentation classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_cgnet(num_classes=num_classes, model_name="cgnet_cityscapes", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): pretrained = False fixed_size = True in_size = (1024, 2048) classes = 19 models = [ cgnet_cityscapes, ] for model in models: net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != cgnet_cityscapes or weight_count == 496306) batch = 4 x = torch.randn(batch, 3, in_size[0], in_size[1]) y = net(x) # y.sum().backward() assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1])) if __name__ == "__main__": _test()
13,575
28.577342
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/wrn1bit_cifar.py
""" WRN-1bit for CIFAR/SVHN, implemented in PyTorch. Original paper: 'Training wide residual networks for deployment using a single bit for each weight,' https://arxiv.org/abs/1802.08530. """ __all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn', 'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn'] import os import math import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F class Binarize(torch.autograd.Function): """ Fake sign op for 1-bit weights. """ @staticmethod def forward(ctx, x): return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * x.sign() @staticmethod def backward(ctx, dy): return dy class Conv2d1bit(nn.Conv2d): """ Standard convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding=1, dilation=1, groups=1, bias=False, binarized=False): super(Conv2d1bit, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.binarized = binarized def forward(self, input): weight = Binarize.apply(self.weight) if self.binarized else self.weight bias = Binarize.apply(self.bias) if self.bias is not None and self.binarized else self.bias return F.conv2d( input=input, weight=weight, bias=bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) def conv1x1_1bit(in_channels, out_channels, stride=1, groups=1, bias=False, binarized=False): """ Convolution 1x1 layer with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ return Conv2d1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, groups=groups, bias=bias, binarized=binarized) def conv3x3_1bit(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False, binarized=False): """ Convolution 3x3 layer with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. binarized : bool, default False Whether to use binarization. """ return Conv2d1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, binarized=binarized) class ConvBlock1bit(nn.Module): """ Standard convolution block with Batch normalization and ReLU activation, and binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, bn_affine=True, activate=True, binarized=False): super(ConvBlock1bit, self).__init__() self.activate = activate self.conv = Conv2d1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, binarized=binarized) self.bn = nn.BatchNorm2d( num_features=out_channels, affine=bn_affine) if self.activate: self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) if self.activate: x = self.activ(x) return x def conv1x1_block_1bit(in_channels, out_channels, stride=1, padding=0, groups=1, bias=False, bn_affine=True, activate=True, binarized=False): """ 1x1 version of the standard convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 0 Padding value for convolution layer. groups : int, default 1 Number of groups. bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ return ConvBlock1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding, groups=groups, bias=bias, bn_affine=bn_affine, activate=activate, binarized=binarized) class PreConvBlock1bit(nn.Module): """ Convolution block with Batch normalization and ReLU pre-activation, and binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bias : bool, default False Whether the layer uses a bias vector. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, bn_affine=True, return_preact=False, activate=True, binarized=False): super(PreConvBlock1bit, self).__init__() self.return_preact = return_preact self.activate = activate self.bn = nn.BatchNorm2d( num_features=in_channels, affine=bn_affine) if self.activate: self.activ = nn.ReLU(inplace=True) self.conv = Conv2d1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, binarized=binarized) def forward(self, x): x = self.bn(x) if self.activate: x = self.activ(x) if self.return_preact: x_pre_activ = x x = self.conv(x) if self.return_preact: return x, x_pre_activ else: return x def pre_conv3x3_block_1bit(in_channels, out_channels, stride=1, padding=1, dilation=1, bn_affine=True, return_preact=False, activate=True, binarized=False): """ 3x3 version of the pre-activated convolution block with binarization. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for convolution layer. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. return_preact : bool, default False Whether return pre-activation. activate : bool, default True Whether activate the convolution block. binarized : bool, default False Whether to use binarization. """ return PreConvBlock1bit( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bn_affine=bn_affine, return_preact=return_preact, activate=activate, binarized=binarized) class PreResBlock1bit(nn.Module): """ Simple PreResNet block for residual path in ResNet unit (with binarization). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, stride, binarized=False): super(PreResBlock1bit, self).__init__() self.conv1 = pre_conv3x3_block_1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_affine=False, return_preact=False, binarized=binarized) self.conv2 = pre_conv3x3_block_1bit( in_channels=out_channels, out_channels=out_channels, bn_affine=False, binarized=binarized) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class PreResUnit1bit(nn.Module): """ PreResNet unit with residual connection (with binarization). Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. binarized : bool, default False Whether to use binarization. """ def __init__(self, in_channels, out_channels, stride, binarized=False): super(PreResUnit1bit, self).__init__() self.resize_identity = (stride != 1) self.body = PreResBlock1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, binarized=binarized) if self.resize_identity: self.identity_pool = nn.AvgPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): identity = x x = self.body(x) if self.resize_identity: identity = self.identity_pool(identity) identity = torch.cat((identity, torch.zeros_like(identity)), dim=1) x = x + identity return x class PreResActivation(nn.Module): """ PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block. Parameters: ---------- in_channels : int Number of input channels. bn_affine : bool, default True Whether the BatchNorm layer learns affine parameters. """ def __init__(self, in_channels, bn_affine=True): super(PreResActivation, self).__init__() self.bn = nn.BatchNorm2d( num_features=in_channels, affine=bn_affine) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class CIFARWRN1bit(nn.Module): """ WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. binarized : bool, default True Whether to use binarization. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (32, 32) Spatial size of the expected input image. num_classes : int, default 10 Number of classification classes. """ def __init__(self, channels, init_block_channels, binarized=True, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARWRN1bit, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_1bit( in_channels=in_channels, out_channels=init_block_channels, binarized=binarized)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), PreResUnit1bit( in_channels=in_channels, out_channels=out_channels, stride=stride, binarized=binarized)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PreResActivation( in_channels=in_channels, bn_affine=False)) self.output = nn.Sequential() self.output.add_module("final_conv", conv1x1_block_1bit( in_channels=in_channels, out_channels=num_classes, activate=False, binarized=binarized)) self.output.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = self.output(x) x = x.view(x.size(0), -1) return x def get_wrn1bit_cifar(num_classes, blocks, width_factor, binarized=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create WRN-1bit model for CIFAR with specific parameters. Parameters: ---------- num_classes : int Number of classification classes. blocks : int Number of blocks. width_factor : int Wide scale factor for width of layers. binarized : bool, default True Whether to use binarization. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)] init_block_channels *= width_factor net = CIFARWRN1bit( channels=channels, init_block_channels=init_block_channels, binarized=binarized, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def wrn20_10_1bit_cifar10(num_classes=10, **kwargs): """ WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_cifar10", **kwargs) def wrn20_10_1bit_cifar100(num_classes=100, **kwargs): """ WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_cifar100", **kwargs) def wrn20_10_1bit_svhn(num_classes=10, **kwargs): """ WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True, model_name="wrn20_10_1bit_svhn", **kwargs) def wrn20_10_32bit_cifar10(num_classes=10, **kwargs): """ WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_cifar10", **kwargs) def wrn20_10_32bit_cifar100(num_classes=100, **kwargs): """ WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 100 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_cifar100", **kwargs) def wrn20_10_32bit_svhn(num_classes=10, **kwargs): """ WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146. Parameters: ---------- num_classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False, model_name="wrn20_10_32bit_svhn", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ (wrn20_10_1bit_cifar10, 10), (wrn20_10_1bit_cifar100, 100), (wrn20_10_1bit_svhn, 10), (wrn20_10_32bit_cifar10, 10), (wrn20_10_32bit_cifar100, 100), (wrn20_10_32bit_svhn, 10), ] for model, num_classes in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140) assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920) assert (model != wrn20_10_1bit_svhn or weight_count == 26737140) assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140) assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920) assert (model != wrn20_10_32bit_svhn or weight_count == 26737140) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes)) if __name__ == "__main__": _test()
24,899
30.558935
115
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/condensenet.py
""" CondenseNet for ImageNet-1K, implemented in PyTorch. Original paper: 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. """ __all__ = ['CondenseNet', 'condensenet74_c4_g4', 'condensenet74_c8_g8'] import os import torch import torch.nn as nn import torch.nn.init as init from torch.autograd import Variable from .common import ChannelShuffle class CondenseSimpleConv(nn.Module): """ CondenseNet specific simple convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups): super(CondenseSimpleConv, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False) def forward(self, x): x = self.bn(x) x = self.activ(x) x = self.conv(x) return x def condense_simple_conv3x3(in_channels, out_channels, groups): """ 3x3 version of the CondenseNet specific simple convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ return CondenseSimpleConv( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, groups=groups) class CondenseComplexConv(nn.Module): """ CondenseNet specific complex convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups): super(CondenseComplexConv, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False) self.c_shuffle = ChannelShuffle( channels=out_channels, groups=groups) self.register_buffer('index', torch.LongTensor(in_channels)) self.index.fill_(0) def forward(self, x): x = torch.index_select(x, dim=1, index=Variable(self.index)) x = self.bn(x) x = self.activ(x) x = self.conv(x) x = self.c_shuffle(x) return x def condense_complex_conv1x1(in_channels, out_channels, groups): """ 1x1 version of the CondenseNet specific complex convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ return CondenseComplexConv( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, groups=groups) class CondenseUnit(nn.Module): """ CondenseNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. groups : int Number of groups. """ def __init__(self, in_channels, out_channels, groups): super(CondenseUnit, self).__init__() bottleneck_size = 4 inc_channels = out_channels - in_channels mid_channels = inc_channels * bottleneck_size self.conv1 = condense_complex_conv1x1( in_channels=in_channels, out_channels=mid_channels, groups=groups) self.conv2 = condense_simple_conv3x3( in_channels=mid_channels, out_channels=inc_channels, groups=groups) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) x = torch.cat((identity, x), dim=1) return x class TransitionBlock(nn.Module): """ CondenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the first unit of each stage. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self): super(TransitionBlock, self).__init__() self.pool = nn.AvgPool2d( kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.pool(x) return x class CondenseInitBlock(nn.Module): """ CondenseNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(CondenseInitBlock, self).__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False) def forward(self, x): x = self.conv(x) return x class PostActivation(nn.Module): """ CondenseNet final block, which performs the same function of postactivation as in PreResNet. Parameters: ---------- in_channels : int Number of input channels. """ def __init__(self, in_channels): super(PostActivation, self).__init__() self.bn = nn.BatchNorm2d(num_features=in_channels) self.activ = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) x = self.activ(x) return x class CondenseLinear(nn.Module): """ CondenseNet specific linear block. Parameters: ---------- in_features : int Number of input channels. out_features : int Number of output channels. drop_rate : float Fraction of input channels for drop. """ def __init__(self, in_features, out_features, drop_rate=0.5): super(CondenseLinear, self).__init__() drop_in_features = int(in_features * drop_rate) self.linear = nn.Linear( in_features=drop_in_features, out_features=out_features) self.register_buffer('index', torch.LongTensor(drop_in_features)) self.index.fill_(0) def forward(self, x): x = torch.index_select(x, dim=1, index=Variable(self.index)) x = self.linear(x) return x class CondenseNet(nn.Module): """ CondenseNet model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. groups : int Number of groups in convolution layers. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), num_classes=1000): super(CondenseNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", CondenseInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() if i != 0: stage.add_module("trans{}".format(i + 1), TransitionBlock()) for j, out_channels in enumerate(channels_per_stage): stage.add_module("unit{}".format(j + 1), CondenseUnit( in_channels=in_channels, out_channels=out_channels, groups=groups)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("post_activ", PostActivation(in_channels=in_channels)) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = CondenseLinear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) elif isinstance(module, nn.BatchNorm2d): init.constant_(module.weight, 1) init.constant_(module.bias, 0) elif isinstance(module, nn.Linear): init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_condensenet(num_layers, groups=4, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create CondenseNet (converted) model with specific parameters. Parameters: ---------- num_layers : int Number of layers. groups : int Number of groups in convolution layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if num_layers == 74: init_block_channels = 16 layers = [4, 6, 8, 10, 8] growth_rates = [8, 16, 32, 64, 128] else: raise ValueError("Unsupported CondenseNet version with number of layers {}".format(num_layers)) from functools import reduce channels = reduce(lambda xi, yi: xi + [reduce(lambda xj, yj: xj + [xj[-1] + yj], [yi[1]] * yi[0], [xi[-1][-1]])[1:]], zip(layers, growth_rates), [[init_block_channels]])[1:] net = CondenseNet( channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def condensenet74_c4_g4(**kwargs): """ CondenseNet-74 (C=G=4) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_condensenet(num_layers=74, groups=4, model_name="condensenet74_c4_g4", **kwargs) def condensenet74_c8_g8(**kwargs): """ CondenseNet-74 (C=G=8) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,' https://arxiv.org/abs/1711.09224. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_condensenet(num_layers=74, groups=8, model_name="condensenet74_c8_g8", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ condensenet74_c4_g4, condensenet74_c8_g8, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != condensenet74_c4_g4 or weight_count == 4773944) assert (model != condensenet74_c8_g8 or weight_count == 2935416) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
14,732
28.059172
120
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/fbnet.py
""" FBNet for ImageNet-1K, implemented in PyTorch. Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. """ __all__ = ['FBNet', 'fbnet_cb'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block class FBNetUnit(nn.Module): """ FBNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the second convolution layer. bn_eps : float Small float added to variance in Batch norm. use_kernel3 : bool Whether to use 3x3 (instead of 5x5) kernel. exp_factor : int Expansion factor for each unit. activation : str, default 'relu' Activation function or name of activation function. """ def __init__(self, in_channels, out_channels, stride, bn_eps, use_kernel3, exp_factor, activation="relu"): super(FBNetUnit, self).__init__() assert (exp_factor >= 1) self.residual = (in_channels == out_channels) and (stride == 1) self.use_exp_conv = True mid_channels = exp_factor * in_channels if self.use_exp_conv: self.exp_conv = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation) if use_kernel3: self.conv1 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bn_eps=bn_eps, activation=activation) else: self.conv1 = dwconv5x5_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bn_eps=bn_eps, activation=activation) self.conv2 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None) def forward(self, x): if self.residual: identity = x if self.use_exp_conv: x = self.exp_conv(x) x = self.conv1(x) x = self.conv2(x) if self.residual: x = x + identity return x class FBNetInitBlock(nn.Module): """ FBNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. bn_eps : float Small float added to variance in Batch norm. """ def __init__(self, in_channels, out_channels, bn_eps): super(FBNetInitBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps) self.conv2 = FBNetUnit( in_channels=out_channels, out_channels=out_channels, stride=1, bn_eps=bn_eps, use_kernel3=True, exp_factor=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class FBNet(nn.Module): """ FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. final_block_channels : int Number of output channels for the final block of the feature extractor. kernels3 : list of list of int/bool Using 3x3 (instead of 5x5) kernel for each unit. exp_factors : list of list of int Expansion factor for each unit. bn_eps : float, default 1e-5 Small float added to variance in Batch norm. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, final_block_channels, kernels3, exp_factors, bn_eps=1e-5, in_channels=3, in_size=(224, 224), num_classes=1000): super(FBNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", FBNetInitBlock( in_channels=in_channels, out_channels=init_block_channels, bn_eps=bn_eps)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] stage.add_module("unit{}".format(j + 1), FBNetUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bn_eps=bn_eps, use_kernel3=use_kernel3, exp_factor=exp_factor)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_block", conv1x1_block( in_channels=in_channels, out_channels=final_block_channels, bn_eps=bn_eps)) in_channels = final_block_channels self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_fbnet(version, bn_eps=1e-5, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create FBNet model with specific parameters. Parameters: ---------- version : str Version of MobileNetV3 ('a', 'b' or 'c'). bn_eps : float, default 1e-5 Small float added to variance in Batch norm. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if version == "c": init_block_channels = 16 final_block_channels = 1984 channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]] kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]] exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]] else: raise ValueError("Unsupported FBNet version {}".format(version)) net = FBNet( channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, bn_eps=bn_eps, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def fbnet_cb(**kwargs): """ FBNet-Cb model (bn_eps=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,' https://arxiv.org/abs/1812.03443. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_fbnet(version="c", bn_eps=1e-3, model_name="fbnet_cb", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ fbnet_cb, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != fbnet_cb or weight_count == 5572200) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
9,969
30.352201
116
py
imgclsmob
imgclsmob-master/pytorch/pytorchcv/models/visemenet.py
""" VisemeNet for speech-driven facial animation, implemented in PyTorch. Original paper: 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. """ __all__ = ['VisemeNet', 'visemenet20'] import os import torch import torch.nn as nn from .common import DenseBlock class VisemeDenseBranch(nn.Module): """ VisemeNet dense branch. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of middle/output channels. """ def __init__(self, in_channels, out_channels_list): super(VisemeDenseBranch, self).__init__() self.branch = nn.Sequential() for i, out_channels in enumerate(out_channels_list[:-1]): self.branch.add_module("block{}".format(i + 1), DenseBlock( in_features=in_channels, out_features=out_channels, bias=True, use_bn=True)) in_channels = out_channels self.final_fc = nn.Linear( in_features=in_channels, out_features=out_channels_list[-1]) def forward(self, x): x = self.branch(x) y = self.final_fc(x) return y, x class VisemeRnnBranch(nn.Module): """ VisemeNet RNN branch. Parameters: ---------- in_channels : int Number of input channels. out_channels_list : list of int Number of middle/output channels. rnn_num_layers : int Number of RNN layers. dropout_rate : float Dropout rate. """ def __init__(self, in_channels, out_channels_list, rnn_num_layers, dropout_rate): super(VisemeRnnBranch, self).__init__() self.rnn = nn.LSTM( input_size=in_channels, hidden_size=out_channels_list[0], num_layers=rnn_num_layers, dropout=dropout_rate) self.fc_branch = VisemeDenseBranch( in_channels=out_channels_list[0], out_channels_list=out_channels_list[1:]) def forward(self, x): x, _ = self.rnn(x) x = x[:, -1, :] y, _ = self.fc_branch(x) return y class VisemeNet(nn.Module): """ VisemeNet model from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. Parameters: ---------- audio_features : int, default 195 Number of audio features (characters/sounds). audio_window_size : int, default 8 Size of audio window (for time related audio features). stage2_window_size : int, default 64 Size of window for stage #2. num_face_ids : int, default 76 Number of face IDs. num_landmarks : int, default 76 Number of landmarks. num_phonemes : int, default 21 Number of phonemes. num_visemes : int, default 20 Number of visemes. dropout_rate : float, default 0.5 Dropout rate for RNNs. """ def __init__(self, audio_features=195, audio_window_size=8, stage2_window_size=64, num_face_ids=76, num_landmarks=76, num_phonemes=21, num_visemes=20, dropout_rate=0.5): super(VisemeNet, self).__init__() stage1_rnn_hidden_size = 256 stage1_fc_mid_channels = 256 stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \ stage2_window_size // audio_window_size self.audio_window_size = audio_window_size self.stage2_window_size = stage2_window_size self.stage1_rnn = nn.LSTM( input_size=audio_features, hidden_size=stage1_rnn_hidden_size, num_layers=3, dropout=dropout_rate) self.lm_branch = VisemeDenseBranch( in_channels=(stage1_rnn_hidden_size + num_face_ids), out_channels_list=[stage1_fc_mid_channels, num_landmarks]) self.ph_branch = VisemeDenseBranch( in_channels=(stage1_rnn_hidden_size + num_face_ids), out_channels_list=[stage1_fc_mid_channels, num_phonemes]) self.cls_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[256, 200, num_visemes], rnn_num_layers=1, dropout_rate=dropout_rate) self.reg_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[256, 200, 100, num_visemes], rnn_num_layers=3, dropout_rate=dropout_rate) self.jali_branch = VisemeRnnBranch( in_channels=stage2_rnn_in_features, out_channels_list=[128, 200, 2], rnn_num_layers=3, dropout_rate=dropout_rate) def forward(self, x, pid): y, _ = self.stage1_rnn(x) y = y[:, -1, :] y = torch.cat((y, pid), dim=1) lm, _ = self.lm_branch(y) lm += pid ph, ph1 = self.ph_branch(y) z = torch.cat((lm, ph1), dim=1) z2 = torch.cat((z, x[:, self.audio_window_size // 2, :]), dim=1) n_net2_input = z2.shape[1] z2 = torch.cat((torch.zeros((self.stage2_window_size // 2, n_net2_input)), z2), dim=0) z = torch.stack( [z2[i:i + self.stage2_window_size].reshape( (self.audio_window_size, n_net2_input * self.stage2_window_size // self.audio_window_size)) for i in range(z2.shape[0] - self.stage2_window_size)], dim=0) cls = self.cls_branch(z) reg = self.reg_branch(z) jali = self.jali_branch(z) return cls, reg, jali def get_visemenet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create VisemeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ net = VisemeNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def visemenet20(**kwargs): """ VisemeNet model for 20 visemes (without co-articulation rules) from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_visemenet(model_name="visemenet20", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ visemenet20, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != visemenet20 or weight_count == 14574303) batch = 34 audio_window_size = 8 audio_features = 195 num_face_ids = 76 num_visemes = 20 x = torch.randn(batch, audio_window_size, audio_features) pid = torch.full(size=(batch, num_face_ids), fill_value=3) y1, y2, y3 = net(x, pid) assert (y1.shape[0] == y2.shape[0] == y3.shape[0]) assert (y1.shape[1] == y2.shape[1] == num_visemes) assert (y3.shape[1] == 2) if __name__ == "__main__": _test()
8,396
30.215613
119
py