repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DialogID | DialogID-main/src/auto_text_classifier/atc/utils/adt_utils.py | '''
对抗训练
参考实现
https://fyubang.com/2019/10/15/adversarial-train/
'''
import torch
import numpy as np
from torch.autograd import Variable
# from loguru import logger
class FGM():
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1., emb_name='emb.'):
'''
对抗攻击
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if param.grad is None:
continue
self.backup[name] = param.data.clone()
# print(f"adt emb name is {name}")
# print(f"param.grad is {param.grad}")
norm = torch.norm(param.grad)
if norm != 0:
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='emb.'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if param.grad is None:
continue
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
class FreeAT():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
class FreeLB():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| 7,143 | 31.770642 | 89 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/utils/data_utils.py | import os
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from transformers.data.processors.utils import InputFeatures
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
def init_dir(dir_path):
"""
Create dir if not exists.
Parameters:
dir_path: dir path
Returns:
None
"""
os.makedirs(dir_path,exist_ok=True)
def train_dev_test_split(df, train_size=0.8):
"""
Split data to train,dev,test. Train_size can be int or float in (0,1).
Parameters:
df: df need to split.
train_size: can be int or float in (0,1).
Returns:
df_train: train data
df_dev: dev data
df_test: test data
"""
df = df.sample(frac=1, random_state=0).copy()
if train_size < 1:
train_size = int(train_size*df.shape[0])
num = df.shape[0]
dev_size = (num-train_size)//2
df_train = df[:train_size]
df_dev = df[train_size:dev_size+train_size]
df_test = df[dev_size+train_size:]
return df_train, df_dev, df_test
def split_3_save_data(save_dir,df,train_size=0.8):
"""
Split data to train,dev,test. Than save data to savedir.Train_size can be int or float in (0,1).
Parameters:
save_dir: where to save data
df: df need to split.
train_size: can be int or float in (0,1).
Returns:
df_train: train data
df_dev: dev data
df_test: test data
"""
df_train,df_dev,df_test = train_dev_test_split(df,train_size)
init_dir(save_dir)
df_train.to_csv(os.path.join(save_dir,"train.csv"),index=False)
df_dev.to_csv(os.path.join(save_dir,"dev.csv"),index=False)
df_test.to_csv(os.path.join(save_dir,"test.csv"),index=False)
return df_train, df_dev, df_test
def load_df(path):
"""
load dataframe data, support csv/xlsx/pickle path or df object
Parameters:
path: ccsv/xlsx/pickle path/df object
Returns:
df:df object
"""
df = None
if isinstance(path, str):
for pd_read_fun in [pd.read_csv, pd.read_excel, pd.read_pickle]:
try:
df = pd_read_fun(path)
break
except:
pass
else:
df = path
#df['label'] = df['label'].apply(int)
#df = df.fillna("")
return df
def load_df_1(path):
"""
load dataframe data, support csv/xlsx/pickle path or df object
without any other constraint
Parameters:
path: ccsv/xlsx/pickle path/df object
Returns:
df:df object
"""
if isinstance(path,str):
for pd_read_fun in [pd.read_csv,pd.read_excel,pd.read_pickle]:
try:
df = pd_read_fun(path)
break
except:
pass
else:
df = path
return df
def get_one_data_report(path, name=""):
"""
get report of one data
Parameters:
path: train_path
name: data name
Returns:
df_data_report:df_data_report
"""
df = load_df(path)
report = df['label'].value_counts().to_dict()
report['总量'] = df.shape[0]
report['数据集'] = name
raw_report_norm = df['label'].value_counts(normalize=True).to_dict()
report_norm = {}
for key, value in raw_report_norm.items():
report_norm["{}占比".format(key)] = round(value, 3)
report.update(report_norm)
return report
def get_data_report(train_path, dev_path, test_path):
"""
get report of all data
Parameters:
train_path: train_path
dev_path: dev_path
test_path: test_path
Returns:
df_data_report:df_data_report
"""
all_report = [get_one_data_report(train_path, "train"),
get_one_data_report(dev_path, "dev"),
get_one_data_report(test_path, "test")]
df_data_report = pd.DataFrame(all_report)
all_cols = df_data_report.columns.tolist()
head_cols = ["数据集","总量"]
other_cols = [x for x in all_cols if x not in head_cols]
df_data_report = df_data_report[head_cols+other_cols]
return df_data_report
class DataGet():
'''
实现K折数据读取,模型会返回 df_train, df_dev, df_test
'''
def __init__(self, df, n_splits=5, random_state=5):
self.df = df
self.n_splits = n_splits
self.random_state = random_state
self.df['index_cv'] = range(len(self.df))
ids = self.df['index_cv'].unique()
self.index_col = 'index_cv'
self.all_split_info = self.get_split_info(ids, n_splits)
def get_split_id(self, all_split_info, kf_i):
split_info = all_split_info[kf_i]
train_ids, dev_ids, test_ids = split_info['train_ids'], split_info['dev_ids'], split_info['test_ids']
return train_ids, dev_ids, test_ids
def get_split_info(self, ids, n_splits=5):
kf = KFold(n_splits=n_splits, shuffle=True, random_state=self.random_state)
split_info = {}
for kf_i, (train_ids, test_ids) in enumerate(kf.split(ids)):
train_ids, dev_ids = train_test_split(
train_ids, test_size=0.1, random_state=self.random_state)
split_info[kf_i] = {"train_ids": list(train_ids), "dev_ids": list(
dev_ids), "test_ids": list(test_ids)}
return split_info
def get_data_index(self, kf_i):
split_info = self.all_split_info[kf_i]
train_ids, dev_ids, test_ids = split_info['train_ids'], split_info['dev_ids'], split_info['test_ids']
return train_ids, dev_ids, test_ids
def get_index_data(self, ids, sep_token="[SEP]"):
df_seg = self.df[self.df[self.index_col].isin(ids)].copy()
return df_seg
def get_data(self, kf_i, sep_token="[SEP]"):
train_ids, dev_ids, test_ids = self.get_data_index(
kf_i=kf_i)
df_train = self.get_index_data(train_ids, sep_token=sep_token)
df_dev = self.get_index_data(dev_ids, sep_token=sep_token)
df_test = self.get_index_data(test_ids, sep_token=sep_token)
return df_train, df_dev, df_test
class DFDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len, multi_label=False, num_labels=1):
dataframe.index = list(range(len(dataframe)))
if 'label' not in dataframe.columns:
if multi_label:
dataframe['label'] = [[0]*num_labels]*dataframe.shape[0]
else:
dataframe['label'] = 0
#
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
self.multi_label = multi_label
def __getitem__(self, index):
title = str(self.data.text[index])
if title.count("[SEP]") == 1:
text1, text2 = title.split("[SEP]")
else:
text1 = title
text2 = None
inputs = self.tokenizer.encode_plus(
text1,
text2,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
#
label = self.data.label[index]
if self.multi_label:
# 多标签分类label
if type(label) == str:
label = eval(label)
label = [float(x) for x in label]
else:
# 单标签分类label
label = int(label)
#
feature = InputFeatures(input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
token_type_ids=inputs['token_type_ids'],
label=label)
return feature
def __len__(self):
return len(self.data)
| 7,870 | 28.927757 | 109 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/utils/hf_train.py | import logging
import math
import os
import re
import shutil
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import random
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler
from tqdm.auto import tqdm, trange
from transformers.data.data_collator import DataCollator,DefaultDataCollator
from transformers.data.processors.utils import InputFeatures
from transformers.modeling_utils import PreTrainedModel
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
EvalPrediction,
PredictionOutput,
TrainOutput,
)
from atc.utils.hf_training_args import TrainingArguments
try:
from apex import amp
_has_apex = True
except ImportError:
_has_apex = False
def is_apex_available():
return _has_apex
try:
import torch_xla.core.xla_model as xm
_has_tpu = True
except ImportError:
_has_tpu = False
def is_tpu_available():
return _has_tpu
try:
import wandb
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn("W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.")
else:
_has_wandb = False if os.getenv("WANDB_DISABLED") else True
except ImportError:
_has_wandb = False
def is_wandb_available():
return _has_wandb
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def is_torch_tpu_available():
return False
if is_apex_available():
from apex import amp
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
if is_wandb_available():
import wandb
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
Parameters:
local_rank (:obj:`int`): The rank of the local process.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
class SequentialDistributedSampler(Sampler):
"""
Distributed Sampler that subsamples indicies sequentially,
making it easier to collate all results at the end.
Even though we only use this sampler for eval and predict (no training),
which means that the model params won't have to be synced (i.e. will not hang
for synchronization even if varied number of forward passes), we still add extra
samples to the sampler to make it evenly divisible (like in `DistributedSampler`)
to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def make_weights_for_balanced_classes(datapoints, nclasses):
count = [0] * nclasses
# Get the class counts
for i in range(len(datapoints)):
item = datapoints.__getitem__(i)
if isinstance(item, InputFeatures):
count[item.label] += 1
else:
count[item[1]] += 1
weight_per_class = [0.0] * nclasses
N = float(sum(count))
for i in range(nclasses):
if count[i] == 0:
weight_per_class[i] = 0.0
else:
weight_per_class[i] = N / float(count[i])
weight = [0] * len(datapoints)
for idx in range(len(datapoints)):
val = datapoints.__getitem__(idx)
# for idx, val in enumerate(datapoints):
if isinstance(item, InputFeatures):
weight[idx] = weight_per_class[val.label]
else:
weight[idx] = weight_per_class[val[1]]
return weight
def get_weighted_random_sampler(dataset):
'''
to use this method assumes that dataset has a get_labels method, will raise an exception if it does not
which means this needs to be modified to support that type of dataset
'''
# to use this method assumes that dataset has a get_labels method, will raise an exception if it does not
# which means this needs to be modified to support that type of dataset
labels = dataset.get_labels()
weights = make_weights_for_balanced_classes(dataset, len(labels))
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
return sampler
def get_tpu_sampler(dataset: Dataset):
if xm.xrt_world_size() <= 1:
return RandomSampler(dataset)
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for 🤗 Transformers.
Parameters:
model (:class:`~transformers.PreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TrainingArguments`):
The arguments to tweak training.
data_collator (:obj:`DataCollator`, `optional`, defaults to :func:`~transformers.default_data_collator`):
The function to use to from a batch from a list of elements of :obj:`train_dataset` or
:obj:`eval_dataset`.
train_dataset (:obj:`Dataset`, `optional`):
The dataset to use for training.
eval_dataset (:obj:`Dataset`, `optional`):
The dataset to use for evaluation.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
tb_writer (:obj:`SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`):
A tuple containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
"""
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
):
self.model = model.to(args.device)
self.args = args
if self.args.patience > 0 and not self.args.evaluate_during_training:
raise ValueError("Patience requires evaluate_during_training.")
if data_collator is not None:
self.data_collator = data_collator
else:
self.data_collator = DefaultDataCollator()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
if tb_writer is not None:
self.tb_writer = tb_writer
elif is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
self.data_collator = self.data_collator.collate_batch
warnings.warn(
(
"The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
+ "with a `collate_batch` are deprecated and won't be supported in a future version."
),
FutureWarning,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
if self.args.use_weighted_random_sampling:
train_sampler = get_weighted_random_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Parameters:
eval_dataset (:obj:`Dataset`, `optional`): If provided, will override `self.eval_dataset`.
Returns:
the evaluation :class:`~torch.utils.data.DataLoader`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Parameters:
test_dataset (obj:`Dataset`): The test dataset to use.
"""
# We use the same batch_size as for eval.
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if self.is_world_master():
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Parameters:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
"""
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True,
)
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={})
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
patience_best_eval_loss = None
patience_evals_without_improvement = 0
patience_should_stop = False
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master()
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master())
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master())
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if self.args.evaluate_during_training and self.global_step % self.args.eval_steps == 0:
results = self.evaluate()
if self.args.patience > 0:
# Keep track of best loss to determine if we should stop early
eval_loss = results["eval_loss"]
if not patience_best_eval_loss or eval_loss < patience_best_eval_loss:
patience_evals_without_improvement = 0
patience_best_eval_loss = eval_loss
self.save_model(os.path.join(self.args.output_dir,"best_model"))
logger.info(
f"Save the best model eval loss is {patience_best_eval_loss}"
)
else:
patience_evals_without_improvement += 1
if patience_evals_without_improvement >= self.args.patience:
patience_should_stop = True
logger.info(
f"Patience threshold ({self.args.patience}) exceeded, stopping training"
)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if (self.args.max_steps > 0 and self.global_step > self.args.max_steps) or patience_should_stop:
epoch_iterator.close()
break
if (self.args.max_steps > 0 and self.global_step > self.args.max_steps) or patience_should_stop:
train_iterator.close()
break
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
if self.tb_writer:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
'"%s" of type %s for key "%s" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute.",
v,
type(v),
k,
)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
print(output)
# logger.info(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
# Our model outputs do not work with DataParallel, so forcing return tuple.
# if self.args.n_gpu > 1:
# inputs["return_tuple"] = True
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# save entire model
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-load-entire-model
torch.save(self.model, os.path.join(output_dir, "raw_model.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None,
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Parameters:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
self._log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Parameters:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
past = None
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0:
inputs["mems"] = past
# Our model outputs do not work with DataParallel, so forcing return tuple.
# if self.args.n_gpu > 1:
# inputs["return_tuple"] = True
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if self.args.past_index >= 0:
past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output | 43,013 | 41.170588 | 131 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/utils/hf_training_args.py | import dataclasses
import json
import logging
import os
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple
from transformers.file_utils import cached_property, is_torch_available, torch_required
def is_torch_tpu_available():
return False
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.getLogger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts
**which relate to the training loop itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not.
do_eval (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run evaluation on the dev set or not.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not.
evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run evaluation during training at each logging step or not.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform.
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter to log and evalulate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wherher to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the mumber of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`, defaults to 1000):
Number of update steps between two evaluations.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
evaluate_during_training: bool = field(
default=False, metadata={"help": "Run evaluation during training at each logging step."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log and eval the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=1000, metadata={"help": "Run an evaluation every X steps."})
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
patience: int = field(
default=-1,
metadata={
"help": (
"If > 0: stops training after evaluating this many times consecutively with non-decreasing loss."
"Requires evaluate_during_training."
)
},
)
use_weighted_random_sampling: bool = field(
default=False,
metadata={
"help": (
"For classification task, reweight sampling mechanism so classes are evenly sampled.",
"Not compatible with distributed sampling or TPU for now.",
)
},
)
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(dataclasses.asdict(self), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = dataclasses.asdict(self)
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} | 15,395 | 44.821429 | 119 | py |
baryrat | baryrat-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'baryrat'
copyright = '2020-2022, Clemens Hofreither'
author = 'Clemens Hofreither'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
master_doc = 'index'
| 2,008 | 33.050847 | 79 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/train_on_simulation.py | from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
def frame2mat(df, num_u, num_i):
row, col = df.uidx, df.iidx
data = np.ones(len(row))
mat = sp.csr_matrix((data, (row, col)), shape=(num_u, num_i))
return mat
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name + '_smaple'))
user_num, item_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
#df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_full.feather'))
tr_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
te_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if args.tune_mode:
tr_df = pd.concate([tr_df, val_df])
te_df = te_df
else:
tr_df = tr_df
te_df = val_df
past_hist = tr_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
item_cnt_dict = tr_df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(item_num)])
logger.info(f'test data size: {te_df.shape}')
dim=args.dim
rel_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_rel.pt')
rel_factor.load_state_dict(torch.load(PATH))
rel_factor.eval()
train_expo_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_expo.pt')
train_expo_factor.load_state_dict(torch.load(PATH))
train_expo_factor.eval()
train_expo_factor = NoiseFactor(train_expo_factor, args.dim)
train_expo_factor = train_expo_factor.to(torch.device(f'cuda:{args.cuda_idx}'))
train_expo_factor.load_state_dict(torch.load(os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt')))
train_expo_factor.eval()
expo_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_expo_bs.pt')
expo_factor.load_state_dict(torch.load(PATH))
expo_factor.eval()
rating_model = RatingEstimator(user_num, item_num, rel_factor)
expo_model = ClassRecommender(user_num, item_num, expo_factor)
tr_mat = frame2mat(tr_df, user_num, item_num)
val_mat = frame2mat(val_df, user_num, item_num)
choices = args.models
logging.info(f'Running {choices}')
def get_model(model_str, user_num, item_num, factor_num):
if model_str == 'mlp':
return MLPRecModel(user_num, item_num, factor_num)
elif model_str == 'gmf':
return FactorModel(user_num, item_num, factor_num)
elif model_str == 'ncf':
return NCFModel(user_num, item_num, factor_num)
else:
raise NotImplementedError(f'{model_str} is not implemented')
logging.info('-------The Popularity model-------')
pop_factor = PopularModel(item_cnt)
pop_model = PopRecommender(pop_factor)
logger.info('unbiased eval for plian popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logger.info('-------The SVD model---------')
sv = SVDRecommender(tr_mat.shape[0], tr_mat.shape[1], dim)
logger.info(f'model with dimension {dim}')
sv.fit(tr_mat)
logger.info('un-biased eval for SVD model on test')
unbiased_eval(user_num, item_num, te_df, sv, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
def complete_experiment(model_str, user_num, item_num, dim):
logging.info(f'-------The {model_str} model-------')
base_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
base_model =ClassRecommender(user_num, item_num, base_factor)
base_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=1e-8,
num_neg=args.num_neg,
past_hist=past_hist,
lr=args.lr)
logger.info(f'unbiased eval for {model_str} model on test')
unbiased_eval(user_num, item_num, te_df, base_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logging.info(f'-------The {model_str} Pop Adjust model-------')
pop_adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
pop_adjust_model = ClassRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
pop_adjust_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
past_hist=past_hist,
lr=args.lr)
logger.info(f'unbiased eval for adjust {model_str} with popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_adjust_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del pop_adjust_factor
logging.info(f'-------The {model_str} Mirror Adjust model-------')
adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
adjust_model = ClassRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1)
adjust_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info(f'un-biased eval for {model_str} mirror adjusted model')
unbiased_eval(user_num, item_num, te_df, adjust_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del adjust_factor
logger.info(f'-------The {model_str} Oracle Adjust model---------')
oracle_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
oracle_model = ClassRecommender(user_num,
item_num, oracle_factor, train_expo_factor, expo_thresh=0.1, expo_compound=args.p)
oracle_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info('un-biased eval for oracle model on test')
unbiased_eval(user_num, item_num, te_df, oracle_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del oracle_factor
for model_str in choices:
if model_str != 'acgan':
complete_experiment(model_str, user_num, item_num, dim)
if 'acgan' in choices:
logger.info('-------The AC GAN model---------')
f = get_model(args.f_model, user_num, item_num, dim)
g = get_model(args.g_model, user_num, item_num, dim)
beta = BetaModel(user_num=user_num, item_num=item_num)
f_recommender = ClassRecommender(user_num, item_num, f)
g_recommender = ClassRecommender(user_num, item_num, g)
g_recommender.fit(tr_df,
num_epochs=args.g_round_head,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
ac_train_v3(f, False, g, False, beta, tr_df,
user_num=user_num,
item_num=item_num,
num_neg=args.num_neg,
past_hist=past_hist,
val_df=te_df,
rating_model=rating_model,
expo_model=expo_model,
num_epochs=args.epoch,
decay=args.decay,
cuda_idx=args.cuda_idx,
lr=args.lr,
g_weight=0.5,
expo_compound=args.p,
epsilon=args.epsilon)
logger.info(f'eval on test with f_model ({args.f_model})')
unbiased_eval(user_num, item_num, te_df, f_recommender, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logger.info(f'eval on test with g_model ({args.g_model})')
unbiased_eval(user_num, item_num, te_df, g_recommender, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=16)
parser.add_argument('--epsilon', type=float, default=4)
parser.add_argument('--p', type=float, default=1)
parser.add_argument('--epoch', type=float, default=10)
parser.add_argument('--decay', type=float, default=1e-7)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--cuda_idx', type=int, default=0)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--tune_mode', action='store_true')
parser.add_argument('--num_neg', type=str, default=4)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--models',
default=['ncf', 'mlp', 'gmf', 'acgan'],
nargs='+',
help = "input a list of ['ncf', 'mlp', 'gmf', 'acgan']")
parser.add_argument('--f_model', type=str, default='mlp')
parser.add_argument('--g_model', type=str, default='mlp')
parser.add_argument('--g_round_head', type=int, default=5)
args = parser.parse_args()
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(f'log/{args.prefix}-{str(time.time())}.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
main(args) | 11,375 | 43.787402 | 111 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/NCF_validation.py | from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
from ncf_utils import *
class DuckModel:
"""An adapter class"""
def __init__(self, model):
self.model = model
def predict(self, in_data, batch_size=100, verbose=0):
users, items = in_data
scores = self.model.score(users.tolist(), items.tolist())
return scores
dataset = Dataset('data/ncf_data/ml-1m')
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
uidx, iidx = train.nonzero()
rating = np.ones_like(uidx).astype(np.float32)
ts = np.arange(rating.shape[0])
train_df = pd.DataFrame({'uidx': uidx, 'iidx': iidx, 'rating': rating, 'ts': ts})
past_hist = train_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
user_num, item_num = train_df.uidx.max() + 1, train_df.iidx.max() + 1
evaluation_threads = 1
factor_num = 32
K = 10
factor = NCFModel(user_num, item_num, factor_num)
recom = ClassRecommender(user_num, item_num, factor)
recom.fit(train_df,
num_epochs=20,
cuda=0,
decay=1e-7,
num_neg=4,
past_hist=past_hist, batch_size=256,
lr=0.01)
duck_model = DuckModel(recom)
hit, ndcg = evaluate_model(duck_model, testRatings, testNegatives, K, evaluation_threads)
print(np.mean(hit), np.mean(ndcg)) | 1,725 | 28.254237 | 99 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/robust_simulation.py | """Script to generate recommendation data from simulation"""
import argparse
from argparse import Namespace
import os
import pandas as pd #type: ignore
import torch #type: ignore
import numpy as np #type: ignore
from scipy import sparse as sp #type: ignore
from tqdm import tqdm #type: ignore
from acgan.data import RatingData
from acgan.module import FactorModel, NoiseFactor
from acgan.recommender import ClassRecommender, RatingEstimator, BPRRecommender
from sklearn.model_selection import train_test_split
torch.manual_seed(123)
np.random.seed(123)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
u_limit, i_limit = args.u_limit, args.i_limit
ratings = ratings[(ratings.uidx < u_limit) & (ratings.iidx < i_limit)]
ratings.reset_index(inplace=True)
ratings.to_feather(os.path.join(args.data_path, args.data_name + '_smaple'))
u_num, i_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
print(f'u: {u_num}, i: {i_num}')
#
print('train rel model')
rel_factor = FactorModel(u_num, i_num, args.dim)
rating_features = list(zip(ratings.uidx, ratings.iidx, ratings.rating))
rating_model = RatingEstimator(u_num, i_num, rel_factor)
rating_model.fit(rating_features, cuda=0, num_epochs=args.epoch)
#
print('train expo model')
expo_factor = FactorModel(u_num, i_num, args.dim)
#expo_model = BPRRecommender(u_num, i_num, expo_factor)
expo_model = ClassRecommender(u_num, i_num, expo_factor)
full_mat = sp.csr_matrix((ratings.rating, (ratings.uidx, ratings.iidx)), shape=(u_num, i_num))
print(full_mat.shape)
expo_model.fit(ratings, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(rel_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_rel.pt'))
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo.pt'))
print('get noise added expo model')
expo_factor = NoiseFactor(expo_factor, args.dim, noise_ratio=args.noise_ratio)
expo_factor = expo_factor.cuda()
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt'))
# re-assign the expo model
expo_model = ClassRecommender(u_num, i_num, expo_factor)
sigmoid = lambda x: np.exp(x) / (1 + np.exp(x))
u_all = np.arange(u_num).repeat(i_num)
i_all = np.arange(i_num).repeat(u_num).reshape(i_num, u_num).reshape(-1, order='F')
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(u_all, i_all)
est_expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event
train_valid = valid
print(f'total size: {len(valid)}, valid size: {valid.sum()}')
out = {}
out['uidx'] = u_all[valid]
out['iidx'] = i_all[valid]
out['click_prob'] = est_click_prob[valid]
out['expo_prob'] = est_expo_prob[valid]
# placeholder variable to train the testing exposure model
out['rating'] = np.ones(out['click_prob'].size)
out['ts'] = np.random.rand(out['click_prob'].size)
train_df = pd.DataFrame(out)
new_expo_factor = FactorModel(u_num, i_num, args.dim).cuda()
new_expo_model = ClassRecommender(u_num, i_num, new_expo_factor)
new_expo_model.fit(train_df, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(new_expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_bs.pt'))
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = new_expo_model.score(u_all, i_all)
expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event * (~train_valid)
robu_out = {}
robu_out['uidx'] = u_all[valid]
robu_out['iidx'] = i_all[valid]
robu_out['click_prob'] = est_click_prob[valid]
robu_out['expo_prob'] = est_expo_prob[valid]
print(valid.sum())
size = valid.sum()
# placeholder variable to train the testing exposure model
robu_out['rating'] = np.ones(size)
robu_out['ts'] = np.random.rand(size)
robu_df = pd.DataFrame(robu_out)
val_df, test_df = train_test_split(robu_df, test_size=0.5)
train_df = train_df.reset_index(drop=True)
print(f'train shape: {train_df.shape}')
val_df = val_df.reset_index(drop=True)
print(f'val shape: {val_df.shape}')
test_df = test_df.reset_index(drop=True)
print(f'test shape: {test_df.shape}')
print(train_df.head())
train_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
test_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=16)
parser.add_argument('--epsilon', type=float, default=3)
parser.add_argument('--p', type=float, default=2)
parser.add_argument('--epoch', type=float, default=10)
parser.add_argument('--decay', type=float, default=1e-8)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--sample_sim', action='store_true')
parser.add_argument('--item_sample_size', type=int, default=2000)
parser.add_argument('--noise_ratio', type=float, default=1.0)
parser.add_argument('--u_limit', type=int, default=500)
parser.add_argument('--i_limit', type=int ,default=1000)
args = parser.parse_args()
main(args) | 6,198 | 43.92029 | 102 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/train_on_real.py | from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
def frame2mat(df, num_u, num_i):
row, col = df.uidx, df.iidx
data = np.ones(len(row))
mat = sp.csr_matrix((data, (row, col)), shape=(num_u, num_i))
return mat
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
user_num, item_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
tr_df = pd.read_feather(os.path.join(args.data_path, 'train.feather'))
val_df = pd.read_feather(os.path.join(args.data_path, 'val.feather'))
te_df = pd.read_feather(os.path.join(args.data_path, 'test.feather'))
if not args.tune_mode:
tr_df = pd.concat([tr_df, val_df])
te_df = te_df
else:
tr_df = tr_df
te_df = val_df
past_hist = tr_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
item_cnt_dict = tr_df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(item_num)])
hist = tr_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
logger.info(f'test data size: {te_df.shape}')
rating_model = None
tr_mat = frame2mat(tr_df, user_num, item_num)
choices = args.models
logging.info(f'Running {choices}')
acgan_config = [args.f_model == 'seq', args.g_model == 'seq']
pop_factor = PopularModel(item_cnt)
logging.info('-------The Popularity model-------')
pop_model = PopRecommender(pop_factor)
logger.info('biased eval for plian popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_model, past_hist=past_hist)
logger.info('-------The SVD model---------')
sv = SVDRecommender(tr_mat.shape[0], tr_mat.shape[1], args.dim)
logger.info(f'model with dimension {args.dim}')
sv.fit(tr_mat)
logger.info('biased eval for SVD model on test')
unbiased_eval(user_num, item_num, te_df, sv, past_hist=past_hist)
#unbiased_eval(user_num, item_num, te_df, sv)
def get_model(model_str, user_num, item_num, factor_num, max_len=50, num_layer=2):
if model_str == 'mlp':
return MLPRecModel(user_num, item_num, factor_num)
elif model_str == 'gmf':
return FactorModel(user_num, item_num, factor_num)
elif model_str == 'ncf':
return NCFModel(user_num, item_num, factor_num)
elif model_str == 'seq':
return AttentionModel(user_num, item_num, args.dim, max_len=max_len, num_layer=num_layer)
else:
raise NotImplementedError(f'{model_str} is not implemented')
def complete_experiment(model_str, user_num, item_num, dim, is_deep):
logging.info(f'-------The {model_str} model-------')
base_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
base_model = DeepRecommender(user_num, item_num, base_factor)
else:
base_model = ClassRecommender(user_num, item_num, base_factor)
base_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
lr=args.lr)
logger.info(f'eval for {model_str} model on test')
unbiased_eval(user_num, item_num, te_df, base_model, past_hist=past_hist)
logging.info(f'-------The {model_str} Pop Adjust model-------')
pop_adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
pop_adjust_model = DeepRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
else:
pop_adjust_model = ClassRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
pop_adjust_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
lr=args.lr)
logger.info(f'eval for adjust {model_str} with popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_adjust_model, past_hist=past_hist)
del pop_adjust_factor
logging.info(f'-------The {model_str} Mirror Adjust model-------')
adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
adjust_model = DeepRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1, expo_isdeep=True)
else:
adjust_model = ClassRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1)
adjust_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info(f'eval for {model_str} mirror adjusted model')
unbiased_eval(user_num, item_num, te_df, adjust_model, past_hist=past_hist)
del adjust_factor
for model_str in choices:
if model_str != 'acgan':
complete_experiment(model_str, user_num, item_num, args.dim, model_str == 'seq')
if 'acgan' in choices:
logger.info(f'-------The AC GAN model with {args.f_model} / {args.g_model}---------')
if acgan_config[0]:
f = AttentionModel(user_num=user_num, item_num=item_num, factor_num=args.dim, max_len=50, num_layer=2)
f_recommender = DeepRecommender(max_u=user_num, max_v=item_num, seq_model=f)
f_recommender.set_user_record(hist)
else:
f = get_model(args.f_model, user_num=user_num, item_num=item_num, factor_num=args.dim)
f_recommender = ClassRecommender(user_num, item_num, f)
if acgan_config[1]:
g = AttentionModel(user_num=user_num, item_num=item_num, factor_num=args.dim, max_len=50, num_layer=2)
g_recommender = DeepRecommender(max_u=user_num, max_v=item_num, seq_model=g)
g_recommender.set_user_record(hist)
else:
g = get_model(args.g_model, user_num=user_num, item_num=item_num, factor_num=args.dim)
g_recommender = ClassRecommender(user_num, item_num, g)
beta = BetaModel(user_num=user_num, item_num=item_num)
g_recommender.fit(tr_df,
num_epochs=args.g_round_head,
cuda=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
ac_train_v3(f, acgan_config[0], g, acgan_config[1], beta, tr_df,
user_num=user_num,
item_num=item_num,
val_df=te_df,
rating_model=rating_model,
num_epochs=args.epoch,
decay=args.decay,
cuda_idx=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
g_weight=0.5,
lr=args.lr)
logger.info(f'--final eval for AC GAN {args.f_model} / {args.g_model}--')
unbiased_eval(user_num, item_num, te_df, f_recommender, past_hist=past_hist)
unbiased_eval(user_num, item_num, te_df, g_recommender, past_hist=past_hist)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=32)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--decay', type=float, default=1e-7)
parser.add_argument('--cuda_idx', type=int, default=0)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_real')
parser.add_argument('--num_neg', type=str, default=4)
parser.add_argument('--tune_mode', action='store_true')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--models',
default=['ncf', 'mlp', 'gmf', 'acgan', 'seq'],
nargs='+',
help = "input a list from ['ncf', 'mlp', 'gmf', 'acgan', 'seq']")
parser.add_argument('--f_model', type=str, default='mlp', choices=['ncf', 'mlp', 'gmf', 'seq'])
parser.add_argument('--g_model', type=str, default='mlp', choices=['ncf', 'mlp', 'gmf', 'seq'])
parser.add_argument('--g_round_head', type=int, default=5)
args = parser.parse_args()
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(f'log/{args.prefix}-{str(time.time())}.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
main(args) | 10,239 | 43.716157 | 125 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/simulation.py | """Script to generate recommendation data from simulation"""
import argparse
from argparse import Namespace
import os
import pandas as pd #type: ignore
import torch #type: ignore
import numpy as np #type: ignore
from scipy import sparse as sp #type: ignore
from tqdm import tqdm #type: ignore
from acgan.data import RatingData
from acgan.module import FactorModel, NoiseFactor
from acgan.recommender import ClassRecommender, RatingEstimator, BPRRecommender
from sklearn.model_selection import train_test_split
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
u_num, i_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
rel_factor = FactorModel(u_num, i_num, args.dim)
expo_factor = FactorModel(u_num, i_num, args.dim)
rating_features = list(zip(ratings.uidx, ratings.iidx, ratings.rating))
rating_model = RatingEstimator(u_num, i_num, rel_factor)
#expo_model = BPRRecommender(u_num, i_num, expo_factor)
expo_model = ClassRecommender(u_num, i_num, expo_factor)
#
print('train rel model')
rating_model.fit(rating_features, cuda=0, num_epochs=args.epoch)
#
print('train expo model')
full_mat = sp.csr_matrix((ratings.rating, (ratings.uidx, ratings.iidx)), shape=(u_num, i_num))
print(full_mat.shape)
expo_model.fit(ratings, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(rel_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_rel.pt'))
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo.pt'))
print('get noise added expo model')
expo_factor = NoiseFactor(expo_factor, args.dim)
expo_factor = expo_factor.cuda()
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt'))
# re-assign the expo model
expo_model = ClassRecommender(u_num, i_num, expo_factor)
sigmoid = lambda x: np.exp(x) / (1 + np.exp(x))
if not args.sample_sim:
if u_num * i_num > 10000 * 10000:
raise ValueError('Size over limit, please use --sample_sim flag')
u_all = np.arange(u_num).repeat(i_num)
i_all = np.arange(i_num).repeat(u_num).reshape(i_num, u_num).reshape(-1, order='F')
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(u_all, i_all)
est_expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event
out = {}
out['uidx'] = u_all
out['iidx'] = i_all
out['click_prob'] = est_click_prob
out['expo_prob'] = est_expo_prob
out['click'] = click_event * expo_event
out['expo'] = expo_event
out_df = pd.DataFrame(out)
out_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_full.feather'))
print(f'total size: {len(valid)}, valid size: {valid.sum()}')
out = {}
out['uidx'] = u_all[valid]
out['iidx'] = i_all[valid]
out['click_prob'] = est_click_prob[valid]
out['expo_prob'] = est_expo_prob[valid]
out_df = pd.DataFrame(out)
else:
print('Too many items to compute, only consider a subset')
template = np.ones(args.item_sample_size).astype(np.int64)
out = {'uidx':[], 'iidx':[], 'click_prob':[], 'expo_prob':[]}
for i in tqdm(range(u_num)):
candidate_item = np.random.randint(low=0, high=i_num, size=args.item_sample_size)
candidate_user = template * i
est_rel = rating_model.score(candidate_user, candidate_item)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(candidate_user, candidate_item)
est_expo_prob = sigmoid(est_logits) ** args.p
click_event = np.random.random(args.item_sample_size) < est_click_prob
expo_event = np.random.random(args.item_sample_size) < est_expo_prob
valid = click_event * expo_event
if valid.sum() >= 1:
out['uidx'].extend(candidate_user[valid].tolist())
out['iidx'].extend(candidate_item[valid].tolist())
out['click_prob'].extend(est_click_prob[valid].tolist())
out['expo_prob'].extend(est_expo_prob[valid].tolist())
if len(out['uidx']) == 0:
raise ValueError('Simulation failed, does not gather positive signals')
out_df = pd.DataFrame(out)
train_df, tmp_df = train_test_split(out_df, test_size=0.2)
val_df, test_df = train_test_split(tmp_df, test_size=0.5)
train_df = train_df.reset_index(drop=True)
print(f'train shape: {train_df.shape}')
val_df = val_df.reset_index(drop=True)
print(f'val shape: {val_df.shape}')
test_df = test_df.reset_index(drop=True)
print(f'test shape: {test_df.shape}')
print(train_df.head())
train_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
test_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2048)
parser.add_argument('--dim', type=int, default=32)
parser.add_argument('--epsilon', type=float, default=4)
parser.add_argument('--epoch', type=float, default=5)
parser.add_argument('--decay', type=float, default=1e-8)
parser.add_argument('--p', type=float, default=3)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--sample_sim', action='store_true')
parser.add_argument('--item_sample_size', type=int, default=2000)
args = parser.parse_args()
main(args) | 6,208 | 44.654412 | 101 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/module.py | """Modules are to express the mathematical relationships between parameters.
Design note: The module shoudn't care about things like data transformations. It should be
as self-contained as possible. Dirty jobs should be done by the Model class which serves
as a bridge between reality(data) and the theory(module).
"""
from typing import List, Tuple, Any, Optional
from scipy import sparse as sp # type: ignore
import numpy as np # type: ignore
import torch # type: ignore
from torch import nn # type: ignore
class PopularModel(nn.Module):
def __init__(self, pop_cnt: np.ndarray, shrinkage: float = 0.5):
super(PopularModel, self).__init__()
pop_cnt_cp = pop_cnt.copy()
pop_cnt_cp[pop_cnt_cp < 1] = 1
rel_pop = (pop_cnt_cp / pop_cnt_cp.max()) ** shrinkage
rel_pop = rel_pop.reshape(-1, 1)
self.rep_pop_table = nn.Embedding(rel_pop.shape[0], 1)
self.rep_pop_table.weight.data.copy_(torch.from_numpy(rel_pop))
self.rep_pop_table.weight.requires_grad = False
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
item_pop_score = self.rep_pop_table(item).squeeze(-1)
return item_pop_score
def get_device(self):
return self.rep_pop_table.weight.device
class FactorModel(nn.Module):
def __init__(self, user_num: int, item_num: int, factor_num: int) -> None:
super(FactorModel, self).__init__()
self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.bias_user = nn.Embedding(user_num, 1, sparse=True)
self.embed_item = nn.Embedding(item_num, factor_num, sparse=True)
self.bias_item = nn.Embedding(item_num, 1, sparse=True)
self.final_layer = nn.Linear(factor_num, 1, bias=True)
#self.bias_global = nn.Parameter(torch.zeros(1))
nn.init.kaiming_normal_(self.embed_user.weight)
nn.init.kaiming_normal_(self.embed_item.weight)
nn.init.zeros_(self.bias_item.weight)
nn.init.zeros_(self.bias_user.weight)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
prediction = (vec_user * vec_item)
return prediction
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
affinity_vec = self.affinity_vector(user, item)
bias_user = self.bias_user(user).squeeze(-1)
bias_item = self.bias_item(item).squeeze(-1)
prediction = self.final_layer(affinity_vec).squeeze(-1)
prediction += bias_item + bias_user
return prediction
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.embed_user.weight, self.bias_user.weight,
self.embed_item.weight, self.bias_item.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
out = []
out.extend(self.final_layer.parameters())
return out
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
l2_loss = (vec_user ** 2).sum()
l2_loss += (vec_item ** 2).sum()
l2_loss += (self.final_layer.weight ** 2).sum()
return l2_loss
def get_device(self):
return self.embed_item.weight.device
class BetaModel(nn.Module):
def __init__(self, user_num: int, item_num: int) -> None:
super(BetaModel, self).__init__()
self.user_const = nn.Embedding(user_num, 1, sparse=True)
self.item_const = nn.Embedding(item_num, 1, sparse=True)
self.alpha = torch.nn.Parameter(torch.zeros(1)) # type: ignore
self.beta = torch.nn.Parameter(torch.ones(1)) # type: ignore
self.label_coef = torch.nn.Parameter(torch.zeros(1)) # type: ignore
nn.init.zeros_(self.user_const.weight)
nn.init.zeros_(self.item_const.weight)
def forward(self, user: torch.Tensor, item: torch.Tensor, g_s: torch.Tensor, label: torch.Tensor) -> torch.Tensor: # type: ignore
#user_v = self.user_const(user).squeeze(-1)
#item_v = self.item_const(item).squeeze(-1)
#score = (self.alpha + self.beta * g_s + self.label_coef * label * g_s)
score = (self.alpha + self.beta * g_s + self.label_coef * label * g_s) # beta v2
#score += user_v + item_v
return score
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.user_const.weight, self.item_const.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
return [self.alpha, self.beta, self.label_coef]
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
user_v = self.user_const(user).squeeze(-1)
item_v = self.item_const(item).squeeze(-1)
l2_loss = (user_v ** 2).sum()
l2_loss += (item_v ** 2).sum()
#l2_loss += (self.beta ** 2).sum()
#l2_loss += (self.alpha ** 2).sum()
#l2_loss += (self.label_coef ** 2).sum()
return l2_loss
class MLPRecModel(nn.Module):
def __init__(
self,
user_num: int,
item_num: int,
factor_num: int,
layers_dim: List[int] = [
32,
16]):
super(MLPRecModel, self).__init__()
self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.embed_item = nn.Embedding(item_num, factor_num, sparse=True)
nn.init.kaiming_normal_(self.embed_user.weight)
nn.init.kaiming_normal_(self.embed_item.weight)
self.dense_layers = nn.ModuleList()
assert(isinstance(layers_dim, list))
input_dims = [2 * factor_num] + layers_dim
for i in range(len(layers_dim)):
self.dense_layers.append(
nn.Linear(input_dims[i], layers_dim[i], bias=True))
self.act_func = nn.ReLU()
self.out_put_layer = nn.Linear(layers_dim[-1], 1, bias=True)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
x = torch.cat([vec_user, vec_item], dim=-1)
for linear_layer in self.dense_layers:
x = linear_layer(x)
x = self.act_func(x)
return x
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
x = self.affinity_vector(user, item)
prediction = self.out_put_layer(x).squeeze(-1)
return prediction
def get_device(self):
return self.embed_item.weight.device
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.embed_user.weight.device
ubt = torch.LongTensor(u_b).to(device)
vbt = torch.LongTensor(v_b).to(device)
score = self.forward(ubt, vbt).cpu().numpy()
return score
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.embed_user.weight, self.embed_item.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
out = []
for layer in self.dense_layers:
out.extend(layer.parameters())
out.extend(self.out_put_layer.parameters())
return out
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
l2_loss = (vec_user ** 2).sum()
l2_loss += (vec_item ** 2).sum()
# for weight in self.get_dense_weight():
# l2_loss += (weight ** 2).sum()
return l2_loss
class NCFModel(nn.Module):
def __init__(self, user_num: int, item_num: int, factor_num: int, layers_dim: Optional[List[int]] = None):
super(NCFModel, self).__init__()
if layers_dim is None:
layers_dim = [factor_num // 2, factor_num // 4]
mlp_out_dim = layers_dim[-1]
gmf_out_dim = factor_num - mlp_out_dim
gmf_in_dim = gmf_out_dim
self.mlp = MLPRecModel(user_num, item_num, factor_num // 2, layers_dim=layers_dim)
self.gmf = FactorModel(user_num, item_num, gmf_in_dim)
self.out_put_layer = nn.Linear(in_features=factor_num, out_features=1, bias=True)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
mlp_vec = self.mlp.affinity_vector(user, item)
gmf_vec = self.gmf.affinity_vector(user, item)
return torch.cat([mlp_vec, gmf_vec], dim=-1)
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
x = self.affinity_vector(user, item)
return self.out_put_layer(x).squeeze(-1)
def get_sparse_weight(self):
return self.mlp.get_sparse_weight() + self.gmf.get_sparse_weight()
def get_dense_weight(self):
return self.mlp.get_dense_weight() + self.gmf.get_dense_weight()
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
l2 = self.mlp.get_l2(user, item)
l2 += self.gmf.get_l2(user, item)
l2 += (self.out_put_layer.weight ** 2).sum()
return l2
def get_device(self):
return self.gmf.get_device()
class StructureNoise(nn.Module):
def __init__(self, factor_num: int) -> None:
super(StructureNoise, self).__init__()
self.l1 = nn.Linear(2 * factor_num, factor_num)
self.l2 = nn.Linear(factor_num, factor_num)
self.l3 = nn.Linear(factor_num, 1)
self.act = nn.ReLU()
def forward(
self,
user_vec: torch.Tensor,
item_vec: torch.Tensor) -> torch.Tensor:
x = torch.cat([user_vec, item_vec], dim=-1)
x = self.act(self.l1(x))
x = self.act(self.l2(x))
x = self.act(self.l3(x)).squeeze(-1)
return x
class NoiseFactor(nn.Module):
def __init__(self, facotr_model: torch.nn.Module, factor_num: int) -> None:
super(NoiseFactor, self).__init__()
self.noise_model = StructureNoise(factor_num)
self.facotr_model = facotr_model
self.embed_item = self.facotr_model.embed_item
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
prediction = self.facotr_model(user, item)
with torch.no_grad():
vec_user = self.facotr_model.embed_user(user)
vec_item = self.facotr_model.embed_item(item)
prediction += self.noise_model(vec_user, vec_item)
return prediction
def get_sparse_weight(self) -> List[torch.Tensor]:
return []
def get_dense_weight(self) -> List[torch.Tensor]:
return []
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
return self.facotr_model.get_l2(user, item)
def get_device(self):
return self.facotr_model.get_device()
class AttentionModel(nn.Module):
def __init__(
self,
user_num: int,
item_num: int,
factor_num: int,
max_len: int = 20,
num_heads: int = 2,
num_layer: int = 2) -> None:
super(AttentionModel, self).__init__()
self.user_num = user_num
self.item_num = item_num
self.factor_num = factor_num
self.padding_idx = self.item_num
self.max_len = max_len
#self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.embed_item = nn.Embedding(item_num + 1, factor_num, sparse=False, padding_idx=self.padding_idx)
#self.target_item_embed = nn.Embedding(item_num + 1, factor_num, sparse=False, padding_idx=self.padding_idx)
self.position_encode = nn.Embedding(max_len, factor_num, sparse=False)
self.attention_list = nn.ModuleList()
for _ in range(num_layer):
self.attention_list.append(nn.MultiheadAttention(embed_dim=factor_num, num_heads=num_heads))
self.output_affine = nn.Linear(factor_num, 1, bias=True)
def get_device(self):
return self.embed_item.weight.device
def seq_vector(self, user_hist: torch.Tensor) -> torch.Tensor:
"""
args:
user: [B]
item: [B]
user_hist: [B, max_len]
"""
hist_item_vec = self.embed_item(user_hist) # [B, max_len, factor_num]
pos = torch.arange(self.max_len, device=self.get_device()).reshape(1, -1).repeat(hist_item_vec.shape[0], 1)
# add positional encoding
mask_item = (user_hist == self.padding_idx)
attn_item_vec = hist_item_vec + self.position_encode(pos)
attn_item_vec = attn_item_vec.transpose(1, 0) #[max_len, B, factor_num]
for atten_layer in self.attention_list:
attn_item_vec, _ = atten_layer(
query=attn_item_vec,
key=attn_item_vec,
value=attn_item_vec,
key_padding_mask=mask_item)
# attn_item_vec - [max_len, B, factor_num]
attn_item_vec = attn_item_vec.mean(dim=0) #[B, factor_num]
return attn_item_vec
def forward(self, items: torch.Tensor, user_hists: torch.Tensor) -> torch.Tensor:
# items - [B, ord]
assert(len(items.shape) == 2)
assert(items.shape[0] == user_hists.shape[0])
affinity_vec = self.seq_vector(user_hists) # [B, dim]
affinity_vec = affinity_vec.unsqueeze(1).repeat(1, items.shape[1], 1) # [B, ord, dim]
target_item_vec = self.embed_item(items) # - [B, ord, dim]
#target_item_vec = self.target_item_embed(items) # - [B, ord, dim]
score = self.output_affine(affinity_vec * target_item_vec) # [B, ord, 1]
return score.squeeze(-1) # [B, ord]
def get_dense_weight(self):
return list(self.parameters())
def get_sparse_weight(self):
return []
def get_l2(self, users: torch.Tensor, items: torch.Tensor) -> torch.Tensor:
target_item_vec = self.embed_item(items)
return (target_item_vec ** 2).sum() * 0
| 14,132 | 39.495702 | 134 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/data.py | import os
import argparse
import logging
from typing import Dict, List, Tuple, Optional, Set
import numpy as np # type: ignore
import pandas as pd # type: ignore
from scipy import sparse as sp # type: ignore
import torch # type: ignore
from torch.utils import data # type: ignore
from numpy.random import RandomState # type: ignore
def ml_1m(
data_path: str,
train_path: str,
val_path: str,
test_path: str) -> None:
ratings = pd.read_csv(
os.path.join(
data_path,
'ratings.dat'),
sep='::',
names=[
'uidx',
'iidx',
'rating',
'ts'],
dtype={
'uidx': int,
'iidx': int,
'rating': float,
'ts': float})
print(ratings.shape)
ratings.uidx = ratings.uidx - 1
ratings.iidx = ratings.iidx - 1
print(ratings.head())
ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
user_hist: Dict[int, List[Tuple[int, float]]] = {}
for row in ratings.itertuples():
if row.uidx not in user_hist:
user_hist[row.uidx] = []
user_hist[row.uidx].append((row.iidx, row.ts))
# sort by ts in descending order
# row represents the user, columns represents the item
train_record: List[Tuple[int, int]] = []
val_record: List[Tuple[int, int]] = []
test_record: List[Tuple[int, int]] = []
for uidx, hist in user_hist.items():
ord_hist = [x[0] for x in sorted(hist, key=lambda x: x[1])]
assert(len(ord_hist) >= 20)
for v in ord_hist[:-2]:
train_record.append((uidx, v))
val_record.append((uidx, ord_hist[-2]))
test_record.append((uidx, ord_hist[-1]))
train_dat = np.ones(len(train_record))
val_dat = np.ones(len(val_record))
test_dat = np.ones(len(test_record))
train_npy = np.array(train_record)
val_npy = np.array(val_record)
test_npy = np.array(test_record)
mat_shape = (ratings.uidx.max() + 1, ratings.iidx.max() + 1)
train_csr = sp.csr_matrix((train_dat, (train_npy[:, 0], train_npy[:, 1])),
shape=mat_shape)
val_csr = sp.csr_matrix((val_dat, (val_npy[:, 0], val_npy[:, 1])),
shape=mat_shape)
test_csr = sp.csr_matrix((test_dat, (test_npy[:, 0], test_npy[:, 1])),
shape=mat_shape)
sp.save_npz(train_path, train_csr)
sp.save_npz(val_path, val_csr)
sp.save_npz(test_path, test_csr)
def time_based_split(
ratings: pd.DataFrame,
data_path: str,
min_len: int = 20) -> None:
names = ['uidx', 'iidx', 'rating', 'ts']
if (ratings.columns == names).min() < 1:
raise ValueError(
f"Only support data frame with columns ['uidx', 'iidx', 'rating', 'ts'], the input is {ratings.columns}")
user_hist: Dict[int, List[Tuple[int, float, float]]] = {}
for row in ratings.itertuples():
if row.uidx not in user_hist:
user_hist[row.uidx] = []
user_hist[row.uidx].append((row.iidx, row.rating, row.ts))
# sort by ts in descending order
train_record = {x: [] for x in names}
val_record = {x: [] for x in names}
test_record = {x: [] for x in names}
def put2record(record, u, obs):
record['uidx'].append(u)
record['iidx'].append(obs[0])
record['rating'].append(obs[1])
record['ts'].append(obs[2])
for uidx, hist in user_hist.items():
ord_hist = [x for x in sorted(hist, key=lambda x: x[-1])]
assert(len(ord_hist) >= 20)
for v in ord_hist[:-2]:
put2record(train_record, uidx, v)
put2record(val_record, uidx, ord_hist[-2])
put2record(test_record, uidx, ord_hist[-1])
train_path = os.path.join(data_path, 'train.feather')
pd.DataFrame(train_record).to_feather(train_path)
val_path = os.path.join(data_path, 'val.feather')
pd.DataFrame(val_record).to_feather(val_path)
test_path = os.path.join(data_path, 'test.feather')
pd.DataFrame(test_record).to_feather(test_path)
def ml_1m_v2(data_path: str) -> None:
names = ['uidx', 'iidx', 'rating', 'ts']
dtype = {'uidx': int, 'iidx': int, 'rating': float, 'ts': float}
ratings = pd.read_csv(os.path.join(data_path, 'ratings.dat'),
sep='::',
names=names,
dtype=dtype)
print(ratings.shape)
ratings.uidx = ratings.uidx - 1
ratings.iidx = ratings.iidx - 1
print(ratings.head())
ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
time_based_split(ratings, data_path, 20)
class NegSeqData(data.Dataset):
def __init__(self,
features: List[Tuple[int,
int]],
num_item: int,
num_neg: int = 0,
is_training: bool = False,
seed: int = 123,
past_hist: Optional[Dict[int,
Set[int]]] = None) -> None:
super(NegSeqData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
"""
self.features = features
self.num_item = num_item
self.train_set = set(features)
self.num_neg = num_neg
self.is_training = is_training
self.past_hist = past_hist
self.prng = RandomState(seed)
def ng_sample(self) -> None:
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
j_list = []
for _ in range(self.num_neg):
is_dup = True
while is_dup:
j = self.prng.randint(self.num_item)
is_dup = (u, j) in self.train_set
if self.past_hist is not None:
is_dup = is_dup or j in self.past_hist.get(u, [])
j_list.append(j)
self.features_fill.append([u, i, j_list])
def __len__(self) -> int:
return len(self.features)
def __getitem__(self, idx):
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j_list = np.array(features[idx][2]) if \
self.is_training else features[idx][1]
return user, item_i, item_j_list
class NegSampleData(data.Dataset):
def __init__(self,
features: List[Tuple[int,
int]],
num_item: int,
num_neg: int = 0,
is_training: bool = False,
seed: int = 123) -> None:
super(NegSampleData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
"""
self.features = features
self.num_item = num_item
self.train_set = set(features)
self.num_neg = num_neg
self.is_training = is_training
self.prng = RandomState(seed)
def ng_sample(self) -> None:
assert self.is_training, 'no need to sample when testing'
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
for _ in range(self.num_neg):
j = self.prng.randint(self.num_item)
while (u, j) in self.train_set:
j = self.prng.randint(self.num_item)
self.features_fill.append([u, i, j])
def __len__(self) -> int:
return self.num_neg * len(self.features) if \
self.is_training else len(self.features)
def __getitem__(self, idx):
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j = features[idx][2] if \
self.is_training else features[idx][1]
return user, item_i, item_j
class RatingData(data.Dataset):
def __init__(self, features: List[Tuple[int, int, float]]) -> None:
super(RatingData, self).__init__()
self.features = features
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
return self.features[idx]
class NegSequenceData(data.Dataset):
def __init__(self, hist: Dict[int, List[int]],
max_len: int,
padding_idx: int,
item_num: int,
num_neg: int = 0,
is_training: bool = False,
past_hist: Optional[Dict[int, Set[int]]] = None,
seed: int = 123,
window: bool = True,
allow_empty: bool =False) -> None:
super(NegSequenceData, self).__init__()
self.max_len = max_len
self.padding_idx = padding_idx
self.num_item = item_num
self.num_neg = num_neg
self.past_hist = past_hist
self.prng = RandomState(seed)
self.logger = logging.getLogger(__name__)
self.logger.debug('Build windowed data')
self.records = []
for uidx, item_list in hist.items():
if window:
for i in range(len(item_list)):
item_slice = item_list[max(0, i - max_len):i]
if not allow_empty and len(item_slice) == 0:
continue
self.records.append([uidx, item_list[i], item_slice])
else:
if not allow_empty and len(item_list) == 1:
continue
self.records.append([uidx, item_list[-1], item_list[-(max_len + 1):-1]])
def __len__(self) -> int:
return len(self.records)
def __getitem__(self, idx):
temp_hist = np.zeros(self.max_len, dtype=int) + self.padding_idx
uidx, pos_item, item_hist = self.records[idx]
assert(len(temp_hist) >= len(item_hist))
if len(item_hist) > 0:
temp_hist[-len(item_hist):] = item_hist
negitem_list = np.zeros(self.num_neg, dtype=int)
for idx in range(self.num_neg):
is_dup = True
while is_dup:
negitem = self.prng.randint(self.num_item)
is_dup = negitem == pos_item
if self.past_hist is not None:
is_dup = is_dup or negitem in self.past_hist.get(uidx, [])
negitem_list[idx] = negitem
return uidx, pos_item, negitem_list, temp_hist
if __name__ == '__main__':
# ml_1m('/mnt/c0r00zy/a()c_gan/data/ml-1m',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/train.npz',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/val.npz',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/test.npz')
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True)
args = parser.parse_args()
ml_1m_v2(args.data_path)
| 11,006 | 34.621359 | 117 | py |
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System | Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/recommender.py | from typing import List, Optional, Tuple, Dict, Set
import time
import logging
from tqdm import tqdm # type: ignore
from scipy import sparse as sp # type: ignore
import numpy as np # type: ignore
from sklearn.utils.extmath import randomized_svd # type: ignore
import torch # type: ignore
from torch import nn # type: ignore
from torch.utils import data # type: ignore
import pandas as pd # type: ignore
from numpy.random import RandomState # type: ignore
from acgan.module import PopularModel
from acgan.data import NegSampleData, RatingData, NegSeqData, NegSequenceData
class MultipleOptimizer:
def __init__(self, *op):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def build_optimizer(lr, *models):
# minimizer
optimizer_list = []
sparse_weight = []
dense_weight = []
for model in models:
sparse_weight.extend(model.get_sparse_weight())
dense_weight.extend(model.get_dense_weight())
if len(sparse_weight) > 0:
optimizer_list.append(torch.optim.SparseAdam(
params=sparse_weight, lr=lr))
if len(dense_weight) > 0:
optimizer_list.append(torch.optim.Adam(params=dense_weight, lr=lr))
if len(optimizer_list) < 1:
raise ValueError('Need at least one dense or sparse weights')
optimizer = MultipleOptimizer(*optimizer_list)
return optimizer
class Recommender:
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
raise NotImplementedError()
return np.zeros(0)
def recommend(self, u_s: int, cand_b: List[int], top_k: int) -> List[int]:
u_b = [u_s] * len(cand_b)
scores = self.score(u_b, cand_b)
top_k_ind = scores.argsort()[::-1][:top_k]
return [cand_b[ind] for ind in top_k_ind]
def fit(self, df: pd.DataFrame) -> None:
raise NotImplementedError()
class PopRecommender(Recommender):
def __init__(self, pop_module: nn.Module) -> None:
self.pop_module = pop_module
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.pop_module.get_device()
self.pop_module.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
scores = self.pop_module(u_b_t, v_b_t)
return scores.cpu().numpy()
class RandRecommender(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
def fit(self, df: pd.DataFrame) -> None:
pass
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
return np.random.rand(len(u_b))
class UserBasedKnn(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
self.user_item_score = None
def fit(self, df: pd.DataFrame) -> None:
row, col = df.uidx, df.iidx
mat = sp.csr_matrix((df.rating, (row, col)), shape=(self.max_u, self.max_v))
uu_weight = mat.dot(mat.T) + sp.eye(self.max_u)
self.user_item_score = uu_weight.dot(mat)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
return np.asarray(self.user_item_score[u_b, v_b]).reshape(-1)
class PopRecommenderV2(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
self.pop_module = None
def fit(self, df: pd.DataFrame) -> None:
item_cnt_dict = df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(self.max_v)])
self.pop_module = PopularModel(item_cnt)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.pop_module.get_device()
self.pop_module.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
scores = self.pop_module(u_b_t, v_b_t)
return scores.cpu().numpy()
class SVDRecommender(Recommender):
def __init__(self, max_u: int, max_v: int, num_factors: int) -> None:
self.USER_factors = np.zeros((max_u, num_factors))
self.ITEM_factors = np.zeros((max_v, num_factors))
self.num_factors = num_factors
def fit(self, train_mat: sp.csr_matrix) -> None:
U, Sigma, VT = randomized_svd(train_mat,
n_components=self.num_factors,
# n_iter=5,
random_state=None)
s_Vt = sp.diags(Sigma) * VT
self.USER_factors = U
self.ITEM_factors = s_Vt.T
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class SVDRecommenderV2(Recommender):
def __init__(self, max_u: int, max_v: int, num_factors: int) -> None:
self.USER_factors = np.zeros((max_u, num_factors))
self.ITEM_factors = np.zeros((max_v, num_factors))
self.max_u = max_u
self.max_v = max_v
self.num_factors = num_factors
def fit(self, df: pd.DataFrame) -> None:
row, col = df.uidx, df.iidx
mat = sp.csr_matrix((df.rating, (row, col)), shape=(self.max_u, self.max_v))
U, Sigma, VT = randomized_svd(mat,
n_components=self.num_factors,
# n_iter=5,
random_state=None)
s_Vt = sp.diags(Sigma) * VT
self.USER_factors = U
self.ITEM_factors = s_Vt.T
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class ContextItemKnn(Recommender):
def __init__(self, max_u: int, max_v: int, item_embed: np.ndarray) -> None:
self.max_u = max_u
self.max_v = max_v
self.ITEM_factors = item_embed
self.USER_factors = np.zeros((max_u, item_embed.shape[1]))
def fit(self, df: pd.DataFrame) -> None:
for uidx, iidx, rating in zip(df.uidx, df.iidx, df.rating):
if rating > 0:
self.USER_factors[uidx, :] += self.ITEM_factors[iidx, :]
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class BPRRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
factor_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1):
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.factor_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
u, v = train_df.uidx.tolist(), train_df.iidx.tolist()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
seq_data = NegSequenceData(
hist,
1,
item_num=self.max_v,
padding_idx=self.max_v,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=True)
data_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in data_loader:
optimizer.zero_grad()
model.zero_grad()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.to(device).long() # [B]
item_i = item_i.to(device).long() # [B]
item_j_list = item_j_list.to(device).long() # [B, num_neg]
#item_hist = item_hist.to(device).long() # [B, max_len]
# reshape
item_i_list = item_i.view(-1, 1).repeat(1, num_neg) # [B, num_neg]
users = user.unsqueeze(1).repeat(
1, num_neg) # [B, num_neg]
prediction_i = model(users, item_i_list) # [B, num_neg]
prediction_j = model(
users, item_j_list) # [B, num_neg]
g_loss = -(prediction_i - prediction_j).sigmoid().log()
g_loss = g_loss.mean()
l2_loss = decay * model.get_l2(users, item_i_list)
l2_loss += decay * model.get_l2(users, item_j_list)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
rating_model = None
if rating_factor is not None:
rating_model = ClassRecommender(
self.max_u, self.max_v, rating_factor)
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=rating_model,
cut_len=10,
expo_model=expo_model,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.get_device()
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class ClassRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
factor_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1) -> None:
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.factor_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
#u, v = train_df.uidx.tolist(), train_df.iidx.tolist()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
seq_data = NegSequenceData(
hist,
1,
item_num=self.max_v,
padding_idx=self.max_v,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=True)
data_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in data_loader:
optimizer.zero_grad()
model.zero_grad()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.to(device).long() # [B]
item_i = item_i.to(device).long() # [B]
item_j_list = item_j_list.to(device).long() # [B, num_neg]
#item_hist = item_hist.to(device).long() # [B, max_len]
# reshape
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).to(device)
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
g_s = model(users, items)
g_prob = act_func(g_s)
if self.expo_factor is not None:
expo_score = self.expo_factor(users, items)
expo_prob = act_func(expo_score) ** self.expo_compound
expo_prob = torch.clamp(expo_prob, min=self.expo_thresh)
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob)) / expo_prob
else:
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
g_loss = g_loss.mean()
l2_loss = decay * model.get_l2(user, items)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
rating_model = None
if rating_factor is not None:
rating_model = ClassRecommender(
self.max_u, self.max_v, rating_factor)
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=rating_model,
cut_len=10,
expo_model=expo_model,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.get_device()
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class RatingEstimator(Recommender):
def __init__(self, max_u: int, max_v: int, factor_model: nn.Module):
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
def fit(self,
features: List[Tuple[int, int, float]],
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
rating_data = RatingData(features)
train_loader = torch.utils.data.DataLoader(
rating_data, batch_size=batch_size, shuffle=True, num_workers=2)
model = self.factor_model
model.to(device)
# minimizer
sp_minimizer = torch.optim.SparseAdam(
params=model.get_sparse_weight(), lr=lr)
ds_minimizer = torch.optim.Adam(params=model.get_dense_weight(), lr=lr)
optimizer = MultipleOptimizer(sp_minimizer, ds_minimizer)
loss_func = torch.nn.MSELoss()
for epoch in tqdm(range(num_epochs)):
model.train()
loss_metric = []
for user, item, rating in train_loader:
optimizer.zero_grad()
model.zero_grad()
user = user.to(device).long()
item = item.to(device).long()
rating = rating.to(device).float()
pred_rating = model(user, item)
loss = loss_func(pred_rating, rating)
l2_loss = decay * model.get_l2(user, item)
target = loss + l2_loss
target.backward()
optimizer.step()
loss_metric.append(loss.item())
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.embed_item.weight.device
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class DeepRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
seq_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1,
expo_isdeep:bool = False):
self.max_u = max_u
self.max_v = max_v
self.seq_model = seq_model
self.max_len = self.seq_model.max_len
self.padding_idx = self.seq_model.padding_idx
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
self.user_records = None
self.expo_isdeep = expo_isdeep
def set_user_record(self, user_record: Dict[int, List[int]]):
self.user_records = user_record
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
window: bool = True,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.seq_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
self.set_user_record(hist)
seq_data = NegSequenceData(
hist, self.max_len,
item_num=self.max_v,
padding_idx=self.padding_idx,
num_neg=num_neg,
window=window,
past_hist=past_hist)
train_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in train_loader:
optimizer.zero_grad()
model.zero_grad()
bsz = item_hist.shape[0]
user = user.to(device).long()
item_i = item_i.to(device).long()
item_j_list = item_j_list.to(device).long()
item_hist = item_hist.to(device).long()
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).to(device)
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
g_s = model(items, item_hist)
g_prob = act_func(g_s)
if self.expo_factor is not None:
if self.expo_isdeep:
expo_score = self.expo_factor(items, item_hist)
else:
expo_score = self.expo_factor(users, items)
expo_prob = act_func(expo_score) ** self.expo_compound
expo_prob = torch.clamp(expo_prob, min=self.expo_thresh)
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob)) / expo_prob
else:
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
g_loss = g_loss.mean()
l2_loss = decay * g_loss * 0 # model.get_l2(user, items)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=None,
cut_len=10,
expo_model=None,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
assert(self.user_records is not None)
temp_hist = np.zeros(self.max_len, dtype=int) + self.padding_idx
item_hist = self.user_records[u_b[0]]
if len(item_hist) == 0:
return np.zeros(len(v_b))
temp_hist[-len(item_hist):] = item_hist[-self.max_len:]
temp_hist = temp_hist.reshape(1, -1)
with torch.no_grad():
device = self.seq_model.get_device()
self.seq_model.eval()
v_b_t = torch.LongTensor(v_b).to(device) # [num_item]
v_b_t = v_b_t.view(1, -1) # [1, num_item]
temp_hist = torch.from_numpy(temp_hist).to(device) # [1, max_len]
scores = self.seq_model(v_b_t, temp_hist).flatten()
return scores.cpu().numpy()
def unbiased_eval(num_user: int, num_item: int, dat_df: pd.DataFrame,
recom: Recommender, rel_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None, expo_compound: float = 1.0,
epsilon: float = 1.0, num_neg: int = 100, cut_len: int = 10, seed: int = 886):
logger = logging.getLogger(__name__)
# this is to make sure comparision between models is fair yet not affect the negative sampling's variation
prng = RandomState(seed)
row, col = dat_df.uidx, dat_df.iidx
def sigmoid(x): return np.exp(x) / (1 + np.exp(x))
recall_cnt = 0
ndcg_sum = 0
for u, i in list(zip(row, col)):
if past_hist is None:
neg = prng.randint(0, num_item, num_neg)
neg = neg[neg != i]
else:
neg = prng.randint(0, num_item, num_neg)
for idx in range(num_neg):
if int(neg[idx]) in past_hist.get(u, []) or i == neg[idx]:
while int(
neg[idx]) not in past_hist.get(
u, []) and i != neg[idx]:
neg[idx] = prng.randint(0, num_item)
item_list: List[int] = neg.tolist()
item_list.append(i)
user_list = [u] * len(item_list)
scores = recom.score(user_list, item_list)
if rel_model is not None:
rel_score = rel_model.score(user_list, item_list)
rel_prob = sigmoid(rel_score - epsilon)
else:
rel_prob = np.ones(len(scores))
expo_score = 1
if expo_model is not None:
expo_score = sigmoid(expo_model.score([u], [i])[0]) ** expo_compound
rank = scores.argsort()[::-1]
item_npy = np.array(item_list)
top_items = item_npy[rank][:cut_len]
top_item_rel_prob = rel_prob[rank][:cut_len]
#recall_cnt += int(i in top_items)
for pos, (top_i, top_rel) in enumerate(
zip(top_items, top_item_rel_prob)):
if i == top_i:
recall_cnt += (top_rel / expo_score)
ndcg_sum += np.log(2) / np.log(2 + pos) * \
(top_rel / expo_score)
logger.info(
f'Recall@{cut_len} = {recall_cnt / len(row):.5f}; NDCG@{cut_len} = {ndcg_sum / len(row):.5f}')
return recall_cnt / len(row)
def ac_train_v2(f_model: torch.nn.Module,
g_model: torch.nn.Module,
beta_model: torch.nn.Module,
tr_df: pd.DataFrame,
user_num: int,
item_num: int,
val_df: Optional[pd.DataFrame] = None,
rating_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
num_epochs: int = 50,
batch_size: int = 2048,
min_prob: float = 0.1,
num_neg: int = 1,
cuda_idx: int = 0,
min_delta: float = 0.1,
lr: float = 0.01,
f_round_ahead: int = 1,
g_round_ahead: int = 1,
decay: float = 0.0):
logger = logging.getLogger(__name__)
with torch.cuda.device(cuda_idx):
f_recommender = ClassRecommender(user_num, item_num, f_model)
g_recommender = ClassRecommender(user_num, item_num, g_model)
u, v = tr_df.uidx.tolist(), tr_df.iidx.tolist()
minimizer = build_optimizer(lr, f_model, beta_model)
maximizer = build_optimizer(lr, g_model)
loss_func = torch.nn.BCELoss(reduction='none')
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
#device_cuda = torch.device(f'cuda:{cuda_idx}')
f_model.cuda()
g_model.cuda()
beta_model.cuda()
def train_epoch(optimizer, data_loader, flag='g_train'):
f_loss_record, g_loss_record = [], []
# train the g_model for one epoch
for c_round in range(g_round_ahead):
for user, item_pos, item_neg_list in data_loader:
f_model.zero_grad()
g_model.zero_grad()
beta_model.zero_grad()
optimizer.zero_grad()
f_model.train()
g_model.train()
beta_model.train()
user = user.long().cuda()
item_pos = item_pos.long().cuda()
item_neg_list = item_neg_list.cuda().long()
item_neg = item_neg_list.flatten()
user_for_neg = user.reshape(
1, -1).repeat(num_neg, 1).t().flatten()
user = torch.cat([user, user_for_neg], dim=0).long()
items = torch.cat([item_pos, item_neg], dim=0).long()
labels = torch.cat([torch.ones(len(item_pos)).cuda(
), torch.zeros(len(item_neg)).cuda()], dim=0).float()
f_s = f_model(user, items)
g_s = g_model(user, items)
q_s = beta_model(user, items, g_s, labels)
f_prob = torch.clamp(act_func(f_s), min=0.01, max=1)
g_prob = torch.clamp(act_func(g_s), min=0.01, max=1)
q_prob = torch.clamp(act_func(q_s), min=min_prob, max=1)
f_loss = -1 * (labels * torch.log(f_prob) +
(1 - labels) * torch.log(1 - f_prob)) / q_prob
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
if flag == 'g_train':
target = (
torch.clamp(
min_delta + g_loss - f_loss,
min=0)).mean() # g wants to maximize the gap
target += decay * g_model.get_l2(user, items)
target.backward()
elif flag == 'f_train':
target = f_loss.mean()
target += decay * \
f_model.get_l2(user, items) + decay * \
beta_model.get_l2(user, items)
target.backward()
else:
raise ValueError('use g_train or f_train')
optimizer.step()
with torch.no_grad():
f_loss = f_loss.mean()
g_loss = g_loss.mean()
f_loss_record.append(f_loss.item())
g_loss_record.append(g_loss.item())
logger.info(
f'{flag} at {c_round} round -- f_loss: {np.mean(f_loss_record)} g_loss: {np.mean(g_loss_record)}')
# pre-fit the g without adjusting
g_recommender.fit(tr_df,
num_epochs=0,
cuda=cuda_idx,
decay=decay)
neg_data = NegSeqData(list(zip(u, v)), item_num,
num_neg=num_neg, past_hist=past_hist)
neg_data.is_training = True
for epoch in range(num_epochs):
neg_data.ng_sample()
data_loader = data.DataLoader(
neg_data,
batch_size=batch_size,
shuffle=True,
num_workers=2,
pin_memory=True)
logger.info(f'Epoch -- {epoch}')
minimizer.zero_grad()
maximizer.zero_grad()
train_epoch(minimizer, data_loader, 'f_train')
train_epoch(maximizer, data_loader, 'g_train')
if val_df is not None:
logger.info('f_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
f_recommender,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist)
logger.info('g_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
g_recommender,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist)
def ac_train_v3(f_model: torch.nn.Module,
is_f_seq: bool,
g_model: torch.nn.Module,
is_g_seq: bool,
beta_model: torch.nn.Module,
tr_df: pd.DataFrame,
user_num: int,
item_num: int,
val_df: Optional[pd.DataFrame] = None,
rating_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
g_weight: float = 1.0,
num_epochs: int = 50,
batch_size: int = 2048,
min_prob: float = 0.1,
num_neg: int = 1,
cuda_idx: int = 0,
min_delta: float = 0.1,
lr: float = 0.01,
decay: float = 0.0,
expo_compound: float = 1.0,
epsilon: float = 1.0):
logger = logging.getLogger(__name__)
with torch.cuda.device(cuda_idx):
if is_f_seq:
f_recommender = DeepRecommender(user_num, item_num, f_model)
else:
f_recommender = ClassRecommender(user_num, item_num, f_model)
if is_g_seq:
g_recommender = DeepRecommender(user_num, item_num, g_model)
else:
g_recommender = ClassRecommender(user_num, item_num, g_model)
minimizer = build_optimizer(lr, f_model, beta_model)
maximizer = build_optimizer(lr, g_model)
loss_func = torch.nn.BCELoss(reduction='none')
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
#device_cuda = torch.device(f'cuda:{cuda_idx}')
f_model.cuda()
g_model.cuda()
beta_model.cuda()
def train_epoch(optimizer, data_loader, flag, is_f_seq, is_g_seq, round_repeat=1):
f_loss_record, g_loss_record = [], []
q_prob_record = []
# train the g_model for one epoch
for c_round in range(round_repeat):
for user, item_i, item_j_list, item_hist in data_loader:
f_model.zero_grad()
g_model.zero_grad()
beta_model.zero_grad()
optimizer.zero_grad()
f_model.train()
g_model.train()
beta_model.train()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.cuda().long() # [B]
item_i = item_i.cuda().long() # [B]
item_j_list = item_j_list.cuda().long() # [B, num_neg]
item_hist = item_hist.cuda().long() # [B, max_len]
# reshape
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).cuda()
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
f_s = f_model(items, item_hist) if is_f_seq else f_model(
users, items)
g_s = g_model(items, item_hist) if is_g_seq else g_model(
users, items)
q_s = beta_model(users, items, g_s, labels)
f_prob = torch.clamp(act_func(f_s), min=0.01, max=1)
g_prob = torch.clamp(act_func(g_s), min=0.01, max=1)
q_prob = torch.clamp(act_func(q_s), min=min_prob, max=1)
f_loss = -1 * (labels * torch.log(f_prob) +
(1 - labels) * torch.log(1 - f_prob)) / q_prob
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
if flag == 'g_train':
target = (
torch.clamp(
min_delta + g_weight * g_loss - f_loss,
min=0)).mean() # g wants to maximize the gap
target += decay * g_model.get_l2(user, items)
target.backward()
elif flag == 'f_train':
target = f_loss.mean()
target += decay * \
f_model.get_l2(user, items) + decay * \
beta_model.get_l2(user, items)
target.backward()
else:
raise ValueError('use g_train or f_train')
optimizer.step()
with torch.no_grad():
f_loss = f_loss.mean()
g_loss = g_loss.mean()
f_loss_record.append(f_loss.item())
g_loss_record.append(g_loss.item())
q_prob_record.append(q_prob.mean().item())
logger.info(
f'{flag} at {c_round} round -- f_loss: {np.mean(f_loss_record)} g_loss: {np.mean(g_loss_record)}, q_prob: {np.mean(q_prob_record)}')
hist = tr_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
if is_f_seq:
f_recommender.set_user_record(hist)
if is_g_seq:
g_recommender.set_user_record(hist)
padding_idx = item_num + 1
max_len = 1
if is_f_seq:
max_len = f_model.max_len
elif is_g_seq:
max_len = g_model.max_len
f_seq_data = NegSequenceData(
hist,
max_len,
item_num=item_num,
padding_idx=padding_idx,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=not is_f_seq)
f_train_loader = data.DataLoader(
f_seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
g_seq_data = NegSequenceData(
hist,
max_len,
item_num=item_num,
padding_idx=padding_idx,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=not is_g_seq)
g_train_loader = data.DataLoader(
g_seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in range(num_epochs):
logger.info(f'Epoch -- {epoch}')
minimizer.zero_grad()
maximizer.zero_grad()
train_epoch(minimizer, f_train_loader,
'f_train', is_f_seq, is_g_seq)
train_epoch(maximizer, g_train_loader,
'g_train', is_f_seq, is_g_seq)
logger.info(f'beta_model: {beta_model.alpha.item()}, {beta_model.beta.item()}, {beta_model.label_coef.item()}')
if val_df is not None:
logger.info('f_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
f_recommender,
epsilon=epsilon,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist,
expo_compound=expo_compound)
logger.info('g_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
g_recommender,
epsilon=epsilon,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist,
expo_compound=expo_compound)
| 42,175 | 38.306617 | 152 | py |
imgclsmob | imgclsmob-master/eval_ke.py | """
Script for evaluating trained model on Keras (validate/test).
"""
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : Model
Model.
val_gen : generator
Data loader.
val_size : int
Size of validation subset.
batch_size : int
Batch size.
num_gpus : int
Number of used GPUs.
calc_weight_count : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
# net.summary()
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| 6,665 | 27.365957 | 118 | py |
imgclsmob | imgclsmob-master/load_model.py | """
Script for downloading model weights.
"""
import argparse
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description="Download model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="model name")
args = parser.parse_args()
return args
def main():
args = parse_args()
from gluon.utils import prepare_model as prepare_model_gl
prepare_model_gl(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
dtype=np.float32)
from pytorch.utils import prepare_model as prepare_model_pt
prepare_model_pt(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
use_cuda=False)
from chainer_.utils import prepare_model as prepare_model_ch
prepare_model_ch(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="")
from tensorflow2.utils import prepare_model as prepare_model_tf2
prepare_model_tf2(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
use_cuda=False)
if __name__ == '__main__':
main()
| 1,326 | 23.574074 | 92 | py |
imgclsmob | imgclsmob-master/eval_gl.py | """
Script for evaluating trained model on MXNet/Gluon (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model
from gluon.utils import calc_net_weight_count, validate
from gluon.utils import validate_asr
from gluon.utils import get_composite_metric
from gluon.utils import report_accuracy
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_batch_fn
from gluon.dataset_utils import get_val_data_source, get_test_data_source
from gluon.model_stats import measure_model
from gluon.gluoncv2.models.model_store import _model_sha1
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--not-show-progress",
action="store_true",
help="do not show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO, LibriSpeech")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def calc_model_accuracy(net,
test_data,
batch_fn,
data_source_needs_reset,
metric,
dtype,
ctx,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
ml_type="cls"):
"""
Main test routine.
Parameters:
----------
net : HybridBlock
Model.
test_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
metric : EvalMetric
Metric object instance.
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
ml_type : str, default 'cls'
Machine learning type.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
validate_fn = validate_asr if ml_type == "asr" else validate
# validate_fn = validate
tic = time.time()
validate_fn(
metric=metric,
net=net,
val_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
in_shapes = [(1, 640 * 25 * 5), (1,)] if ml_type == "asr" else\
[(1, in_channels, input_image_size[0], input_image_size[1])]
num_flops, num_macs, num_params = measure_model(
model=net,
in_shapes=in_shapes,
ctx=ctx[0])
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.data_subset != "test") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
do_hybridize=(ds_metainfo.allow_hybridize and (not args.calc_flops)),
ctx=ctx)
assert (hasattr(net, "in_size"))
input_image_size = net.in_size
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if not args.not_show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
metric=test_metric,
dtype=args.dtype,
ctx=ctx,
input_image_size=input_image_size,
in_channels=args.in_channels,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
ml_type=ds_metainfo.ml_type)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag = model_metainfo
args.model = model_name
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 11,941 | 31.53951 | 117 | py |
imgclsmob | imgclsmob-master/sotabench.py | from torchbench.image_classification import ImageNet
from pytorch.pytorchcv.models.model_store import _model_sha1
from pytorch.pytorchcv.model_provider import get_model as ptcv_get_model
import torchvision.transforms as transforms
import torch
import math
from sys import version_info
# import os
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
net = ptcv_get_model(model_name, pretrained=True)
error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and (rem[-1] == "*")):
continue
paper_model_name = caption
paper_arxiv_id = paper
input_image_size = img_size
resize_inv_factor = scale
batch_size = batch
model_description = "pytorch" + (rem if rem == "" else ", " + rem)
assert (not hasattr(net, "in_size")) or (input_image_size == net.in_size[0])
ImageNet.benchmark(
model=net,
model_description=model_description,
paper_model_name=paper_model_name,
paper_arxiv_id=paper_arxiv_id,
input_transform=transforms.Compose([
transforms.Resize(int(math.ceil(float(input_image_size) / resize_inv_factor))),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
batch_size=batch_size,
num_gpu=1,
# data_root=os.path.join("..", "imgclsmob_data", "imagenet")
)
torch.cuda.empty_cache()
| 1,645 | 39.146341 | 109 | py |
imgclsmob | imgclsmob-master/train_tf2.py | """
Script for training model on TensorFlow 2.0.
"""
import os
import logging
import argparse
import numpy as np
import random
import tensorflow as tf
from common.logger_utils import initialize_logging
from tensorflow2.tf2cv.model_provider import get_model
from tensorflow2.dataset_utils import get_dataset_metainfo, get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs.")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of pip packages for logging")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification/segmentation (TensorFlow 2.0)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
return seed
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
data_format = "channels_last"
tf.keras.backend.set_image_data_format(data_format)
model = args.model
net = get_model(model, data_format=data_format)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
test_loss = tf.keras.metrics.Mean(name="test_loss")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = net(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, net.trainable_variables)
optimizer.apply_gradients(zip(gradients, net.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
predictions = net(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
# assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
batch_size = args.batch_size
train_data, train_img_count = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
data_format=data_format)
val_data, val_img_count = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
data_format=data_format)
num_epochs = args.num_epochs
for epoch in range(num_epochs):
for images, labels in train_data:
train_step(images, labels)
# break
for test_images, test_labels in val_data:
test_step(test_images, test_labels)
# break
template = "Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}"
logging.info(template.format(
epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
if __name__ == "__main__":
main()
| 8,479 | 28.041096 | 102 | py |
imgclsmob | imgclsmob-master/eval_pt.py | """
Script for evaluating trained model on PyTorch (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from pytorch.utils import prepare_pt_context, prepare_model
from pytorch.utils import calc_net_weight_count, validate
from pytorch.utils import get_composite_metric
from pytorch.utils import report_accuracy
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_val_data_source, get_test_data_source
from pytorch.model_stats import measure_model
from pytorch.pytorchcv.models.model_store import _model_sha1
def add_eval_cls_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--remove-module",
action="store_true",
help="enable if stored model has module")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="torch, torchvision",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, ADE20K, Cityscapes, "
"COCO, LibriSpeech, MCV")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def prepare_dataset_metainfo(args):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
return ds_metainfo
def prepare_data_source(ds_metainfo,
data_subset,
batch_size,
num_workers):
"""
Prepare data loader.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
data_subset : str
Data subset.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader
Data source.
"""
assert (data_subset in ("val", "test"))
if data_subset == "val":
get_data_source_class = get_val_data_source
else:
get_data_source_class = get_test_data_source
data_source = get_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
return data_source
def prepare_metric(ds_metainfo,
data_subset):
"""
Prepare metric.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
data_subset : str
Data subset.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
assert (data_subset in ("val", "test"))
if data_subset == "val":
metric_names = ds_metainfo.val_metric_names
metric_extra_kwargs = ds_metainfo.val_metric_extra_kwargs
else:
metric_names = ds_metainfo.test_metric_names
metric_extra_kwargs = ds_metainfo.test_metric_extra_kwargs
metric = get_composite_metric(
metric_names=metric_names,
metric_extra_kwargs=metric_extra_kwargs)
return metric
def update_input_image_size(net,
input_size):
"""
Update input image size for model.
Parameters:
----------
net : Module
Model.
input_size : int
Preliminary value for input image size.
Returns:
-------
tuple of 2 ints
Spatial size of the expected input image.
"""
real_net = net.module if hasattr(net, "module") else net
input_image_size = real_net.in_size if hasattr(real_net, "in_size") else\
((input_size, input_size) if type(input_size) == int else input_size)
return input_image_size
def calc_model_accuracy(net,
test_data,
metric,
use_cuda,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
ml_type="cls"):
"""
Estimating particular model accuracy.
Parameters:
----------
net : Module
Model.
test_data : DataLoader
Data loader.
metric : EvalMetric
Metric object instance.
use_cuda : bool
Whether to use CUDA.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
ml_type : str, default 'cls'
Machine learning type.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
tic = time.time()
validate(
metric=metric,
net=net,
val_data=test_data,
use_cuda=use_cuda)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
in_shapes = [(1, 640 * 25 * 5), (1,)] if ml_type == "asr" else\
[(1, in_channels, input_image_size[0], input_image_size[1])]
num_flops, num_macs, num_params = measure_model(
model=net,
in_shapes=in_shapes)
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = prepare_dataset_metainfo(args=args)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
data_source = prepare_data_source(
ds_metainfo=ds_metainfo,
data_subset=args.data_subset,
batch_size=batch_size,
num_workers=args.num_workers)
metric = prepare_metric(
ds_metainfo=ds_metainfo,
data_subset=args.data_subset)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
num_classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
remove_module=args.remove_module)
input_image_size = update_input_image_size(
net=net,
input_size=(args.input_size if hasattr(args, "input_size") else None))
if args.show_progress:
from tqdm import tqdm
data_source = tqdm(data_source)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=data_source,
metric=metric,
use_cuda=use_cuda,
input_image_size=input_image_size,
in_channels=args.in_channels,
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
ml_type=ds_metainfo.ml_type)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
dataset_name_map = {
"in1k": "ImageNet1K",
"cub": "CUB200_2011",
"cf10": "CIFAR10",
"cf100": "CIFAR100",
"svhn": "SVHN",
"voc": "VOC",
"ade20k": "ADE20K",
"cs": "Cityscapes",
"cocoseg": "CocoSeg",
"cocohpe": "CocoHpe",
"hp": "HPatches",
"ls": "LibriSpeech",
"mcv": "MCV",
}
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and (rem[-1] == "*")):
continue
args.dataset = dataset_name_map[ds]
args.model = model_name
args.input_size = img_size
args.resize_inv_factor = scale
args.batch_size = batch
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 13,989 | 29.347072 | 120 | py |
imgclsmob | imgclsmob-master/eval_gl_det.py | """
Script for evaluating trained model on MXNet/Gluon (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model
from gluon.utils import calc_net_weight_count, validate
from gluon.utils import get_composite_metric
from gluon.utils import report_accuracy
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_batch_fn
from gluon.dataset_utils import get_val_data_source, get_test_data_source
from gluon.model_stats import measure_model
from gluon.gluoncv2.models.model_store import _model_sha1
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu102б mxnet-cu110",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def calc_model_accuracy(net,
test_data,
batch_fn,
data_source_needs_reset,
metric,
dtype,
ctx,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : HybridBlock
Model.
test_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
metric : EvalMetric
Metric object instance.
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
tic = time.time()
validate(
metric=metric,
net=net,
val_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size, ctx[0])
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
classes=(args.classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
do_hybridize=(ds_metainfo.allow_hybridize and (not args.calc_flops)),
ctx=ctx)
assert (hasattr(net, "in_size"))
input_image_size = net.in_size
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if args.show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
metric=test_metric,
dtype=args.dtype,
ctx=ctx,
input_image_size=input_image_size,
in_channels=args.in_channels,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag = model_metainfo
args.model = model_name
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 11,409 | 31.140845 | 117 | py |
imgclsmob | imgclsmob-master/train_ke.py | """
Script for training model on Keras.
"""
import argparse
import time
import logging
import os
import numpy as np
import random
import keras
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import mxnet as mx
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help='the index of training data')
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current number of training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="keras",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, keras-applications, keras-preprocessing",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
momentum,
lr,
num_gpus,
state_file_path=None):
optimizer_name = optimizer_name.lower()
if (optimizer_name == "sgd") or (optimizer_name == "nag"):
optimizer = keras.optimizers.SGD(
lr=lr,
momentum=momentum,
nesterov=(optimizer_name == "nag"))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
net = load_model(filepath=state_file_path)
return net
def train_net(net,
train_gen,
val_gen,
train_num_examples,
val_num_examples,
num_epochs,
checkpoint_filepath,
start_epoch1):
checkpointer = ModelCheckpoint(
filepath=checkpoint_filepath,
verbose=1,
save_best_only=True)
tic = time.time()
net.fit_generator(
generator=train_gen,
samples_per_epoch=train_num_examples,
epochs=num_epochs,
verbose=True,
callbacks=[checkpointer],
validation_data=val_gen,
validation_steps=val_num_examples,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=(start_epoch1 - 1))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
train_gen = get_data_generator(
data_iterator=train_data,
num_classes=num_classes)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
net = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
momentum=args.momentum,
lr=args.lr,
num_gpus=args.num_gpus,
state_file_path=args.resume_state)
train_net(
net=net,
train_gen=train_gen,
val_gen=val_gen,
train_num_examples=1281167,
val_num_examples=50048,
num_epochs=args.num_epochs,
checkpoint_filepath=os.path.join(args.save_dir, "imagenet_{}.h5".format(args.model)),
start_epoch1=args.start_epoch)
if __name__ == "__main__":
main()
| 8,801 | 26.85443 | 118 | py |
imgclsmob | imgclsmob-master/eval_tf2.py | """
Script for evaluating trained model on TensorFlow 2.0 (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
import tensorflow as tf
from common.logger_utils import initialize_logging
from tensorflow2.utils import prepare_model
from tensorflow2.tf2cv.models.model_store import _model_sha1
from tensorflow2.dataset_utils import get_dataset_metainfo, get_val_data_source, get_test_data_source
from tensorflow2.utils import get_composite_metric
from tensorflow2.utils import report_accuracy
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (TensorFlow 2.0)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def test_model(args,
use_cuda,
data_format):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
use_cuda : bool
Whether to use CUDA.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
batch_size=batch_size,
use_cuda=use_cuda)
assert (hasattr(net, "in_size"))
if not args.calc_flops_only:
tic = time.time()
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data, total_img_count = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
data_format=data_format)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if args.show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
processed_img_count = 0
for test_images, test_labels in test_data:
predictions = net(test_images)
test_metric.update(test_labels, predictions)
processed_img_count += len(test_images)
if processed_img_count >= total_img_count:
break
accuracy_msg = report_accuracy(
metric=test_metric,
extended_log=True)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = test_metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
return acc_values
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "0"
# os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
# os.environ["TF_DETERMINISTIC_OPS"] = "1"
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
data_format = "channels_last"
tf.keras.backend.set_image_data_format(data_format)
use_cuda = (args.num_gpus > 0)
if args.all:
args.use_pretrained = True
dataset_name_map = {
"in1k": "ImageNet1K",
"cub": "CUB200_2011",
"cf10": "CIFAR10",
"cf100": "CIFAR100",
"svhn": "SVHN",
"voc": "VOC",
"ade20k": "ADE20K",
"cs": "Cityscapes",
"cocoseg": "CocoSeg",
"cocohpe": "CocoHpe",
"hp": "HPatches",
"ls": "LibriSpeech",
"mcv": "MCV",
}
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag, ds, scale = model_metainfo
args.dataset = dataset_name_map[ds]
args.model = model_name
args.resize_inv_factor = scale
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(
args=args,
use_cuda=use_cuda,
data_format=data_format)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
tf.keras.backend.clear_session()
else:
test_model(
args=args,
use_cuda=use_cuda,
data_format=data_format)
if __name__ == "__main__":
main()
| 9,076 | 29.979522 | 117 | py |
imgclsmob | imgclsmob-master/prep_model.py | """
Script for preparing the model for publication.
"""
import os
import argparse
import subprocess
import shutil
import re
import hashlib
import zipfile
import pandas as pd
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(description="Prepare model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="model name")
parser.add_argument(
"--resume",
type=str,
default="",
help="model weights (Gluon) file path")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
args = parser.parse_args()
return args
def calc_sha1(file_name):
"""
Calculate sha1 hash of the file content.
Parameters:
----------
file_name : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
str
sha1 hex digest.
"""
sha1 = hashlib.sha1()
with open(file_name, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def post_process(dst_dir_path,
model_name,
model_file_path,
log_file_path,
dst_model_file_ext,
log_line_num):
"""
Post-process weight/log files.
Parameters:
----------
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_model_file_ext : str
Destination model file extension.
log_line_num : int
Log file last line number for analysis.
Returns:
-------
top5_err : str
top5 error value.
sha1_value : str
sha1 hex digest.
"""
with open(log_file_path, "r") as f:
log_file_tail = f.read().splitlines()[log_line_num]
err5_str = re.findall(r", err-top5=\d+\.\d+", log_file_tail)
if len(err5_str) != 0:
top5_err = re.findall(r"\d+\.\d+", err5_str[0])[0].split(".")[1]
else:
with open(log_file_path, "r") as f:
log_file_tail = f.read().splitlines()[log_line_num - 1]
err5_str = re.findall(r", err-top5=\d+\.\d+", log_file_tail)
top5_err = re.findall(r"\d+\.\d+", err5_str[0])[0].split(".")[1]
sha1_value = calc_sha1(model_file_path)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_value[:8], dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
os.rename(model_file_path, dst_model_file_path)
os.rename(log_file_path, dst_model_file_path + ".log")
with zipfile.ZipFile(dst_model_file_path + ".zip", "w", zipfile.ZIP_DEFLATED) as zf:
zf.write(filename=dst_model_file_path, arcname=dst_model_file_name)
os.remove(dst_model_file_path)
return top5_err, sha1_value
def process_fwk(prep_info_dict,
dst_framework,
dst_dir_path,
model_name,
model_file_path,
log_file_path,
input_size):
"""
Process weights on specific framework.
Parameters:
----------
prep_info_dict : dict
Dictionary with preparation meta-info.
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_framework : str
Destination framework.
input_size : int
Size of the input for model.
"""
if dst_framework == "gluon":
dst_model_file_ext = "params"
eval_script = "eval_gl"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "pytorch":
dst_model_file_ext = "pth"
eval_script = "eval_pt"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "chainer":
dst_model_file_ext = "npz"
eval_script = "eval_ch"
num_gpus = 1
calc_flops = ""
log_line_num = -2
elif dst_framework == "tf2":
dst_model_file_ext = "tf2.h5"
eval_script = "eval_tf2"
num_gpus = 1
calc_flops = ""
log_line_num = -2
else:
raise ValueError("Unknown framework: {}".format(dst_framework))
post_proc_log_files = [f for f in os.listdir(dst_dir_path) if f.endswith(".{}.log".format(dst_model_file_ext))]
assert (len(post_proc_log_files) in [0, 1])
if len(post_proc_log_files) == 0:
dst_raw_log_file_path = os.path.join(dst_dir_path, "train.log")
shutil.copy2(log_file_path, dst_raw_log_file_path)
dst_raw_model_file_path = os.path.join(dst_dir_path, "{}.{}".format(model_name, dst_model_file_ext))
if dst_framework == "gluon":
shutil.copy2(model_file_path, dst_raw_model_file_path)
else:
command = "python3 convert_models.py --src-fwk=gluon --dst-fwk={dst_framework} --src-model={model_name}" \
" --dst-model={model_name} --src-params={model_file_path}" \
" --dst-params={dst_raw_model_file_path} --save-dir={dst_dir_path}"
subprocess.call([command.format(
dst_framework=dst_framework,
model_name=model_name,
model_file_path=model_file_path,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path)], shell=True)
command = "python3 {eval_script}.py --model={model_name} --resume={dst_raw_model_file_path}" \
" --save-dir={dst_dir_path} --num-gpus={num_gpus} --batch-size=100 -j=4 --input-size={input_size} " \
"{calc_flops}"
subprocess.call([command.format(
eval_script=eval_script,
model_name=model_name,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path,
num_gpus=num_gpus,
input_size=input_size,
calc_flops=calc_flops)], shell=True)
if dst_framework == "gluon":
shutil.copy2(dst_raw_log_file_path, log_file_path)
top5_err, sha1_value = post_process(
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=dst_raw_model_file_path,
log_file_path=dst_raw_log_file_path,
dst_model_file_ext=dst_model_file_ext,
log_line_num=log_line_num)
else:
model_name1, top5_err, sha1_short = post_proc_log_files[0].split(".")[0].split("-")
assert (model_name1 == model_name)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_short, dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
dst_zip_model_file_path = dst_model_file_path + ".zip"
assert os.path.exists(dst_zip_model_file_path)
with zipfile.ZipFile(dst_zip_model_file_path, "r") as zf:
zf.extract(dst_model_file_name, dst_dir_path)
sha1_value = calc_sha1(dst_model_file_path)
os.remove(dst_model_file_path)
prep_info_dict["Type"].append(dst_framework)
prep_info_dict["Top5"].append(top5_err)
prep_info_dict["Sha1"].append(sha1_value)
def main():
args = parse_args()
model_name = args.model
model_file_path = os.path.expanduser(args.resume)
if not os.path.exists(model_file_path):
raise Exception("Model file doesn't exist: {}".format(model_file_path))
root_dir_path = os.path.dirname(model_file_path)
log_file_path = os.path.join(root_dir_path, "train.log")
if not os.path.exists(log_file_path):
raise Exception("Log file doesn't exist: {}".format(log_file_path))
dst_dir_path = os.path.join(root_dir_path, "_result")
if not os.path.exists(dst_dir_path):
os.mkdir(dst_dir_path)
prep_info_dict = {
"Type": [],
"Top5": [],
"Sha1": [],
}
input_size = args.input_size
dst_frameworks = ["gluon", "pytorch", "chainer", "tf2"]
# dst_frameworks = ["tf2"]
for dst_framework in dst_frameworks:
process_fwk(
prep_info_dict=prep_info_dict,
dst_framework=dst_framework,
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=model_file_path,
log_file_path=log_file_path,
input_size=input_size)
prep_info_df = pd.DataFrame(prep_info_dict)
prep_info_df.to_csv(
os.path.join(root_dir_path, "prep_info.csv"),
sep="\t",
index=False)
if __name__ == '__main__':
main()
| 9,068 | 30.380623 | 119 | py |
imgclsmob | imgclsmob-master/convert_models.py | """
Script for converting models between frameworks (MXNet, Gluon, PyTroch, Chainer, Keras, TensorFlow).
"""
import argparse
import logging
import re
import numpy as np
from common.logger_utils import initialize_logging
def parse_args():
parser = argparse.ArgumentParser(description="Convert models (Gluon/PyTorch/Chainer/MXNet/Keras/TF/TF2)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--src-fwk",
type=str,
required=True,
help="source model framework name")
parser.add_argument(
"--dst-fwk",
type=str,
required=True,
help="destination model framework name")
parser.add_argument(
"--src-model",
type=str,
required=True,
help="source model name")
parser.add_argument(
"--dst-model",
type=str,
required=True,
help="destination model name")
parser.add_argument(
"--src-params",
type=str,
default="",
help="source model parameter file path")
parser.add_argument(
"--dst-params",
type=str,
default="",
help="destination model parameter file path")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
parser.add_argument(
"--remove-module",
action="store_true",
help="enable if stored PyTorch model has module")
parser.add_argument(
"--src-num-classes",
type=int,
default=1000,
help="number of classes for source model")
parser.add_argument(
"--src-in-channels",
type=int,
default=3,
help="number of input channels for source model")
parser.add_argument(
"--dst-num-classes",
type=int,
default=1000,
help="number of classes for destination model")
parser.add_argument(
"--dst-in-channels",
type=int,
default=3,
help="number of input channels for destination model")
parser.add_argument(
"--model-type",
type=str,
default="image",
help="model type (image or audio)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
args = parser.parse_args()
return args
def prepare_src_model(src_fwk,
src_model,
src_params_file_path,
dst_fwk,
ctx,
use_cuda,
load_ignore_extra=False,
remove_module=False,
num_classes=None,
in_channels=None):
ext_src_param_keys = None
ext_src_param_keys2 = None
src_net = None
if src_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
src_net = prepare_model_gl(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
src_params = src_net._collect_params_with_prefix()
src_param_keys = list(src_params.keys())
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and
(key.endswith(".bn.gamma") or key.endswith(".bn.beta")))]
if dst_fwk == "chainer":
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".running_mean")) and
(not key.endswith(".running_var"))]
ext_src_param_keys = [key for key in src_param_keys_ if (key.endswith(".running_mean")) or
(key.endswith(".running_var"))]
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".index"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".index"))]
elif src_model.startswith("xdensenet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".mask"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".mask"))]
elif src_model.startswith("jasper") or src_model.startswith("quartznet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".window")) and
(not key.endswith(".fb"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".window")) or
(key.endswith(".fb"))]
elif src_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
src_net = prepare_model_pt(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
use_cuda=use_cuda,
use_data_parallel=False,
load_ignore_extra=load_ignore_extra,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
remove_module=remove_module)
src_params = src_net.state_dict()
src_param_keys = list(src_params.keys())
if dst_fwk != "pytorch":
src_param_keys = [key for key in src_param_keys if not key.endswith("num_batches_tracked")]
if src_model in ["oth_shufflenetv2_wd2"]:
src_param_keys = [key for key in src_param_keys if not key.startswith("network.0.")]
if src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = []
for i in range(2, 6):
src1_i = list(filter(re.compile("level{}".format(i)).search, src1))
if len(src1_i) == 0:
continue
max_len = max([len(k) for k in src1_i])
pattern_i = [k for k in src1_i if len(k) == max_len][0][:-21]
src2_i = list(filter(re.compile(pattern_i).search, src1))
src2 += src2_i
src_param_keys = src2 + src1n
elif src_fwk == "mxnet":
import mxnet as mx
src_sym, src_arg_params, src_aux_params = mx.model.load_checkpoint(
prefix=src_params_file_path,
epoch=0)
src_params = {}
src_params.update(src_arg_params)
src_params.update(src_aux_params)
src_param_keys = list(src_params.keys())
elif src_fwk == "tensorflow":
# import tensorflow as tf
# from tensorflow_.utils import prepare_model as prepare_model_tf
# src_net = prepare_model_tf(
# model_name=src_model,
# classes=num_classes,
# use_pretrained=False,
# pretrained_model_file_path=src_params_file_path)
# src_param_keys = [v.name for v in tf.global_variables()]
# src_params = {v.name: v for v in tf.global_variables()}
src_net = None
src_params = dict(np.load(src_params_file_path))
src_param_keys = list(src_params.keys())
elif (src_fwk == "tf2") and (dst_fwk == "tfl"):
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
src_net = prepare_model_tf2(
model_name=src_model,
use_pretrained=True,
pretrained_model_file_path="")
batch_size = 1
input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
src_net.data_format == "channels_first" else
(batch_size, src_net.in_size[0], src_net.in_size[1], 3))
src_net(tf.random.normal(input_shape))
src_params = None
src_param_keys = None
else:
raise ValueError("Unsupported src fwk: {}".format(src_fwk))
return src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net
def prepare_dst_model(dst_fwk,
dst_model,
src_fwk,
ctx,
use_cuda,
num_classes=None,
in_channels=None,
model_type="image"):
if dst_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
dst_net = prepare_model_gl(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
dst_params = dst_net._collect_params_with_prefix()
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
dst_net = prepare_model_pt(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
use_cuda=use_cuda,
use_data_parallel=False,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels)
dst_params = dst_net.state_dict()
dst_param_keys = list(dst_params.keys())
if src_fwk != "pytorch":
dst_param_keys = [key for key in dst_param_keys if not key.endswith("num_batches_tracked")]
elif dst_fwk == "chainer":
from chainer_.utils import prepare_model as prepare_model_ch
dst_net = prepare_model_ch(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_params = {i[0]: i[1] for i in dst_net.namedparams()}
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "keras":
from keras_.utils import prepare_model as prepare_model_ke
dst_net = prepare_model_ke(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
# dst_param_keys = list(dst_net._arg_names) + list(dst_net._aux_names)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {}
for layer in dst_net.layers:
if layer.name:
for weight in layer.weights:
if weight.name:
dst_params.setdefault(weight.name, []).append(weight)
dst_params[weight.name] = (layer, weight)
elif dst_fwk == "tensorflow":
import tensorflow as tf
from tensorflow_.utils import prepare_model as prepare_model_tf
dst_net = prepare_model_tf(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_param_keys = [v.name for v in tf.global_variables()]
dst_params = {v.name: v for v in tf.global_variables()}
elif dst_fwk == "tf2":
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
dst_net = prepare_model_tf2(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
batch_size = 1
if model_type == "image":
input_shape = ((batch_size, 3, dst_net.in_size[0], dst_net.in_size[1]) if
dst_net.data_format == "channels_first" else
(batch_size, dst_net.in_size[0], dst_net.in_size[1], 3))
dst_net(tf.random.normal(input_shape))
else:
seq_len = 100 * 640
# input_shape = ((batch_size, dst_net.in_channels, seq_len) if
# dst_net.data_format == "channels_first" else
# (batch_size, seq_len, dst_net.in_channels))
input_shape = (batch_size, seq_len)
x_len = tf.convert_to_tensor(np.array([seq_len - 0], dtype=np.long))
dst_net(tf.random.normal(input_shape), x_len)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {v.name: v for v in dst_net.weights}
elif dst_fwk == "tfl":
dst_net = None
dst_params = None
dst_param_keys = None
else:
raise ValueError("Unsupported dst fwk: {}".format(dst_fwk))
return dst_params, dst_param_keys, dst_net
def convert_mx2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
ctx):
if src_model in ["crunet56", "crunet116"]:
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^conv", "features.", key) for key in src_param_keys]
src_param_keys = [re.sub("^fc6", "output.1.", key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-a', '.body.conv1.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c3x3-b', '.body.conv2A.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-b', '.body.conv2B.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-c', '.body.conv3.', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__x_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__x_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_1x1_bases\[dim3\]_weight$', '_x__7.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_3x3_bases\[dim21\]_weight$', '_x__7.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_1x1_bases\[dim3\]_weight$', '_x__14.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_3x3_bases\[dim21\]_weight$', '_x__14.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/2\)', '.input_convZ.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w_weight$', '.input_convZ.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/1\)', '.input_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/key\)', '.identity_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('__conv_weight$', '.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_beta$', '.bn.beta', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_gamma$', '.bn.gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_mean$', '.bn.running_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_var$', '.bn.running_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1__relu-sp__bn_', '1_x_1.conv.bnA.', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^features\.", "conv", key) for key in src_param_keys]
src_param_keys = [re.sub('^output\.1\.', 'fc6', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convT\.weight$', '_x__x_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convT\.weight$', '_x__x_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convQ\.weight$', '_x__(1)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convQ\.weight$', '_x__(1)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv1\.convQ\.weight$', '_x__(2)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv2\.convQ\.weight$', '_x__(2)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv1\.convQ\.weight$', '_x__(3)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv2\.convQ\.weight$', '_x__(3)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv1\.', '_c1x1-a', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2A\.', '_c3x3-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2B\.', '_c1x1-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv3\.', '_c1x1-c', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.conv\.weight$', '_c1x1-w_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.', '_c1x1-w(s/2)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_conv\.', '_c1x1-w(s/1)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.identity_conv\.', '_c1x1-w(s/key)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.conv\.weight$', '__conv_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.beta$', '__bn__bn_beta', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.gamma$', '__bn__bn_gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_mean$', '__bn__bn_moving_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_var$', '__bn__bn_moving_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1\.conv\.bnA\.', '1_x_1__relu-sp__bn_', key) for key in src_param_keys]
dst_i = 0
for src_i, src_key in enumerate(src_param_keys):
dst_key = dst_param_keys[dst_i]
for tt in range(10):
if (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]) and\
(dst_params[dst_key].shape == src_params[src_key].shape):
break
assert (dst_key.split('.')[-1].split('_')[-1] == "weight")
dst_i += 1
dst_key = dst_param_keys[dst_i]
dst_i += 1
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1])
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
return
elif src_model in ["igcv3_w1"]:
src_param_keys = [key.replace("seq-", "features.") for key in src_param_keys]
src_param_keys = [key.replace("fc_", "output.1.") for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_beta', '.bn.beta') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_gamma', '.bn.gamma') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_mean', '.bn.running_mean') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_var', '.bn.running_var') for key in src_param_keys]
src_param_keys = [key.replace('-conv2d_weight', '.conv.weight') for key in src_param_keys]
src_param_keys = [key.replace('first-3x3-conv', 'features.A') for key in src_param_keys]
src_param_keys = [key.replace('last-1x1-conv', 'features.B') for key in src_param_keys]
src_param_keys = [key.replace('-exp', '.conv1') for key in src_param_keys]
src_param_keys = [key.replace('-depthwise', '.conv2') for key in src_param_keys]
src_param_keys = [key.replace('-linear', '.conv3') for key in src_param_keys]
src_param_keys = [key.replace("-block", ".block") for key in src_param_keys]
dst_param_keys = [key.replace('features.0.', 'features.A.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.6.', 'features.B.') for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace('.bn.beta', '-batchnorm_beta') for key in src_param_keys]
src_param_keys = [key.replace('.bn.gamma', '-batchnorm_gamma') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_mean', '-batchnorm_moving_mean') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_var', '-batchnorm_moving_var') for key in src_param_keys]
src_param_keys = [key.replace('.conv.weight', '-conv2d_weight') for key in src_param_keys]
src_param_keys = [key.replace('features.A', 'first-3x3-conv') for key in src_param_keys]
src_param_keys = [key.replace('features.B', 'last-1x1-conv') for key in src_param_keys]
src_param_keys = [key.replace('.conv1', '-exp') for key in src_param_keys]
src_param_keys = [key.replace('.conv2', '-depthwise', ) for key in src_param_keys]
src_param_keys = [key.replace('.conv3', '-linear') for key in src_param_keys]
src_param_keys = [key.replace("features.", "seq-") for key in src_param_keys]
src_param_keys = [key.replace("output.1.", "fc_") for key in src_param_keys]
src_param_keys = [key.replace(".block", "-block") for key in src_param_keys]
dst_param_keys = [key.replace('features.A.', 'features.0.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.B.', 'features.6.') for key in dst_param_keys]
elif src_model in ["preresnet269b"]:
dst_net.features[1][0].body.conv1a.bn.initialize(ctx=ctx, verbose=True, force_reinit=True)
dst1 = list(filter(re.compile("^features.1.0.body.conv1.bn.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst1]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub('^classifier_', "output.", key) for key in src_param_keys]
src_param_keys = [re.sub('^res', "features.", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_weight$', '_conv1_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_weight$', '_conv2_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_weight$', '_conv3_aweight', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^output\.", "classifier_", key) for key in src_param_keys]
src_param_keys = [re.sub("^features\.", "res", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_aweight$', '_conv1_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_aweight$', '_conv2_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_aweight$', '_conv3_weight', key) for key in src_param_keys]
for src_i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ch(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ext_src_param_keys,
ext_src_param_keys2,
src_model):
if src_model.startswith("diares") or src_model.startswith("diapreres"):
src1 = list(filter(re.compile("^features\.[0-9]*\.\d*[1-9]\d*\.attention").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n
assert (len(src_param_keys) == len(dst_param_keys))
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/final_block/", "features/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/W", "/weight") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/body/", "/features/zbody/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_postactiv/", "features/stageN/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_block/", "features/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_conv/", "features/stageN/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/hg/", "/stage1_hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/zfinal_block/", "features/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight", "/W") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_postactiv/", "/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/zbody/", "/features/body/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/stageN/final_conv/", "features/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/stage1_hg/", "/hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
ext2_src_param_keys = [key.replace('.conv.weight', '.bn.beta') for key in src_param_keys if
key.endswith(".conv.weight")]
ext2_src_param_keys.append("features.4.bn.beta")
ext2_dst_param_keys = [key.replace("/conv/W", "/bn/beta") for key in dst_param_keys if key.endswith("/conv/W")]
ext2_dst_param_keys.append("/features/post_activ/bn/beta")
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
else:
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".beta")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/beta")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys):
src_key1 = src_key.split(".")[-1]
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
if src_key1 == 'running_mean':
assert (obj.avg_mean.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.avg_mean.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.avg_mean.shape)
obj.avg_mean = src_params[src_key]._data[0].asnumpy()
elif src_key1 == 'running_var':
assert (obj.avg_var.shape == src_params[src_key].shape)
obj.avg_var = src_params[src_key]._data[0].asnumpy()
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
assert (dst_net.output.fc.index.shape == src_params["output.1.index"].shape)
dst_net.output.fc.index = src_params["output.1.index"]._data[0].asnumpy().astype(np.int32)
ext_src_param_keys2.remove("output.1.index")
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-2]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-2], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.index.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.index.shape)
obj.index = src_params[src_key]._data[0].asnumpy().astype(np.int32)
elif src_model.startswith("xdensenet"):
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")] +\
[key for key in src_param_keys if key.endswith(".conv2.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")] +\
[key for key in dst_param_keys if key.endswith("/conv2/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.mask.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.mask.shape)
obj.mask = src_params[src_key]._data[0].asnumpy()
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].array.shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape)
dst_params[dst_key].array = src_params[src_key]._data[0].asnumpy()
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape))
from chainer.serializers import save_npz
save_npz(
file=dst_params_file_path,
obj=dst_net)
def convert_gl2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
finetune,
src_model,
ctx):
if src_model.startswith("oth_danet_resnet"):
src6 = list(filter(re.compile("^head.sa.gamma").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
src7 = list(filter(re.compile("^head.conv51").search, src_param_keys))
src7n = [key for key in src_param_keys if key not in src7]
src_param_keys = src7n + src7
src8 = list(filter(re.compile("^head.conv6").search, src_param_keys))
src8n = [key for key in src_param_keys if key not in src8]
src_param_keys = src8n + src8
src1 = list(filter(re.compile("^head.conv5c").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^head.sc").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("^head.conv52").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("^head.conv7").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("^head.conv8").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
elif src_model.startswith("oth_icnet_resnet50_citys"):
src1 = list(filter(re.compile("^conv_sub1").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
src2 = list(filter(re.compile("^head").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
elif src_model.startswith("oth_fastscnn_citys"):
src1 = list(filter(re.compile("^feature_fusion").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst0 = list(filter(re.compile("^fusion").search, dst_param_keys))
dst0n = [key for key in dst_param_keys if key not in dst0]
dst_param_keys = dst0n + dst0
dst1 = list(filter(re.compile("^fusion.low_pw_conv.bn").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^fusion.high_conv.bn").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_params[dst_key].shape != src_params[src_key].shape:
logging.warning(
"dst_param.shape != src_param.shape, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
if finetune:
continue
else:
raise ValueError
if dst_key.split('.')[-1] != src_key.split('.')[-1]:
logging.warning(
"dst_key.suff != src_key.suff, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
dst_params[dst_key]._load_init(src_params[src_key]._data[0], ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ke(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
# dst_param_keys = list(np.unique(dst_param_keys))
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
dst_layer = dst_params[dst_key][0]
dst_weight = dst_params[dst_key][1]
if (dst_layer.__class__.__name__ in ["Conv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 1, 0))
if (dst_layer.__class__.__name__ in ["DepthwiseConv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 0, 1))
if (dst_layer.__class__.__name__ in ["Dense"]) and dst_key.endswith("kernel1"):
src_weight = np.transpose(src_weight, (1, 0))
assert (dst_weight._keras_shape == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, dst_weight._keras_shape)
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_weight.shape, dst_weight._keras_shape))
dst_weight.bind(mx.nd.array(src_weight))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel1")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias1")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_gl2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "dw_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
assert (tuple(dst_params[dst_key].get_shape().as_list()) == src_weight.shape)
sess.run(dst_params[dst_key].assign(src_weight))
# print(dst_params[dst_key].eval(sess))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
# saver = tf.train.Saver()
# saver.save(
# sess=sess,
# save_path=dst_params_file_path)
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_gl2tf2(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model):
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".transition.", ".atransition.") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".atransition.", ".transition.") for key in src_param_keys]
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/depthwise_kernel:", "/weight_depthwise:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/transition/", "/atransition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/dw_conv/', '/z_dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/down", "features/z_down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("da_net/head/", "z_da_net/head/") for key in dst_param_keys]
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_depthwise:", "/depthwise_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/atransition/", "/transition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/z_dw_conv/', '/dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/z_down", "features/down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("z_da_net/head/", "da_net/head/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
elif len(src_weight.shape) == 3:
if not ((src_model.startswith("jasper") or src_model.startswith("quartznet")) and
dst_key.split("/")[-1][:-2] == "fb"):
src_weight = np.transpose(src_weight, axes=(2, 1, 0))
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
assert(len(dst_params[dst_key].shape) == 4)
src_weight = np.expand_dims(src_weight, -1)
dst_weight = dst_params[dst_key]
assert (tuple(dst_weight.shape) == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, tuple(dst_weight.shape))
dst_weight.assign(src_weight)
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
# print("src_key={},\tsrc_key2={},\tdst_key={}".format(src_key, src_params[src_key].name, dst_key))
if dst_key.find("convgroup") >= 0:
import mxnet as mx
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_pt2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
dst_model):
import torch
if src_model.startswith("oth_quartznet") or src_model.startswith("oth_jasper"):
src1 = list(filter(re.compile("\.res\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst1 = list(filter(re.compile("\.identity_block\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dicenet"):
src1 = list(filter(re.compile("\.conv_height\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("\.conv_width\.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("\.linear_comb_layer\.").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("\.proj_layer\.").search, src3n))
src4n = [key for key in src3n if key not in src4]
src_param_keys = src4n + src1 + src2 + src3 + src4
dst1 = list(filter(re.compile("\.h_conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("\.w_conv\.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst3 = list(filter(re.compile("\.att\.").search, dst2n))
dst3n = [key for key in dst2n if key not in dst3]
dst4 = list(filter(re.compile("\.proj_conv\.").search, dst3n))
dst4n = [key for key in dst3n if key not in dst4]
dst_param_keys = dst4n + dst1 + dst2 + dst3 + dst4
elif src_model.startswith("oth_proxyless"):
src1 = src_param_keys[5]
del src_param_keys[5]
src_param_keys.insert(0, src1)
src2 = src_param_keys[-3]
del src_param_keys[-3]
src_param_keys.insert(-7, src2)
elif src_model.startswith("oth_scnet"):
pass
src1 = list(filter(re.compile(".k1.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile(".scconv.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src_param_keys = src2n + src1 + src2
dst1 = list(filter(re.compile(".conv2a.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile(".conv2b.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst2n + dst1 + dst2
elif src_model == "oth_bisenet":
src1 = list(filter(re.compile("^cp.conv_avg").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^cp.arm32").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^cp.conv_head32").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^cp.arm16").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^cp.conv_head16").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^ffm").search, src5n))
src6n = [key for key in src5n if key not in src6]
src_param_keys = src6n + src1 + src2 + src3 + src4 + src5 + src6
dst1 = list(filter(re.compile("^pool").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("\.project_conv").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1 + dst1n
elif dst_model == "ntsnet":
src1 = list(filter(re.compile("^proposal_net").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("^navigator_unit\.branch\d+\.down").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("^navigator_unit\.branch\d+\.tidy").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst1 + dst2 + dst2n
elif dst_model == "fishnet150":
src1 = list(filter(re.compile("^(conv|fish\.fish\.[0-2])").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^fish\.fish\.6\.1").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^fish\.fish\.5\.1").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^fish\.fish\.4\.1").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^fish\.fish\.3\.[0-1]").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^fish\.fish\.3\.3").search, src5n))
src6n = [key for key in src5n if key not in src6]
src7 = list(filter(re.compile("^fish\.fish\.[3-6]").search, src6n))
src7n = [key for key in src6n if key not in src7]
src8 = list(filter(re.compile("^fish\.fish\.9\.1").search, src7n))
src8n = [key for key in src7n if key not in src8]
src9 = list(filter(re.compile("^fish\.fish\.8\.1").search, src8n))
src9n = [key for key in src8n if key not in src9]
src10 = list(filter(re.compile("^fish\.fish\.7\.1").search, src9n))
src10n = [key for key in src9n if key not in src10]
src_param_keys = src1 + src2 + src3 + src4 + src5 + src6 + src7 + src8 + src9 + src10 + src10n
elif dst_model == "bam_resnet50":
src_bams = list(filter(re.compile("^bam").search, src_param_keys))
src_param_keys = [key for key in src_param_keys if key not in src_bams]
src_param_keys = src_param_keys + src_bams
dst_bams = list(filter(re.compile("^features.stage[0-9].unit1.bam.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst_bams]
dst_param_keys = dst_param_keys + dst_bams
elif dst_model.startswith("sinet"):
src1 = list(filter(re.compile("\.vertical.weight").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("\.horizontal.weight").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("\.B_v\.").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("\.B_h\.").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("bn_4\.").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
src6 = list(filter(re.compile("bn_3\.").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
dst1 = list(filter(re.compile("\.v_conv.conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("\.h_conv.conv\.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("\.v_conv.bn\.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("\.h_conv.bn\.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("decoder.decode1.bn\.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("decoder.decode2.bn\.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
elif src_model.startswith("oth_ibppose"):
def sort_hg(src2):
src2b1 = list(filter(re.compile("^hourglass.[0-9].hg.0.1.").search, src2))
src2b2 = list(filter(re.compile("^hourglass.[0-9].hg.1.1.").search, src2))
src2b3 = list(filter(re.compile("^hourglass.[0-9].hg.2.1.").search, src2))
src2b4 = list(filter(re.compile("^hourglass.[0-9].hg.3.1.").search, src2))
src2b5 = list(filter(re.compile("^hourglass.[0-9].hg.3.2.").search, src2))
src2b6 = list(filter(re.compile("^hourglass.[0-9].hg.3.3.").search, src2))
src2b7 = list(filter(re.compile("^hourglass.[0-9].hg.2.2.").search, src2))
src2b8 = list(filter(re.compile("^hourglass.[0-9].hg.2.3.").search, src2))
src2b9 = list(filter(re.compile("^hourglass.[0-9].hg.1.2.").search, src2))
src2b10 = list(filter(re.compile("^hourglass.[0-9].hg.1.3.").search, src2))
src2b11 = list(filter(re.compile("^hourglass.[0-9].hg.0.2.").search, src2))
src2b12 = list(filter(re.compile("^hourglass.[0-9].hg.0.3.").search, src2))
src2b13 = list(filter(re.compile("^hourglass.[0-9].hg.0.0.").search, src2))
src2b14 = list(filter(re.compile("^hourglass.[0-9].hg.1.0.").search, src2))
src2b15 = list(filter(re.compile("^hourglass.[0-9].hg.2.0.").search, src2))
src2b16 = list(filter(re.compile("^hourglass.[0-9].hg.3.0.").search, src2))
src2b17 = list(filter(re.compile("^hourglass.[0-9].hg.3.4.").search, src2))
return src2b1 + src2b2 + src2b3 + src2b4 +\
src2b11 + src2b12 + src2b9 + src2b10 + src2b7 + src2b8 + src2b5 + src2b6 +\
src2b13 + src2b14 + src2b15 + src2b16 + src2b17
src1 = list(filter(re.compile("^pre.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^hourglass.").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src2b1 = sort_hg(list(filter(re.compile("^hourglass.0.hg.").search, src2)))
src2b2 = sort_hg(list(filter(re.compile("^hourglass.1.hg.").search, src2)))
src2b3 = sort_hg(list(filter(re.compile("^hourglass.2.hg.").search, src2)))
src2b4 = sort_hg(list(filter(re.compile("^hourglass.3.hg.").search, src2)))
src_param_keys = src2n + src2b1 + src2b2 + src2b3 + src2b4
src3 = list(filter(re.compile("^features.[0-9].before_regress").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src3b = list(filter(re.compile("^features.[0-9].before_regress.0.").search, src3))
src_param_keys = src3n + src3b
src4 = list(filter(re.compile("^outs.[0-9].").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src4b = list(filter(re.compile("^outs.[0-9].0.").search, src4))
src_param_keys = src4n + src4b
src5 = list(filter(re.compile("^merge_features.[0-9].").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src5b = list(filter(re.compile("^merge_features.[0-9].0.").search, src5))
src_param_keys = src5n + src5b
src6 = list(filter(re.compile("^merge_preds.[0-9].").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src6b = list(filter(re.compile("^merge_preds.[0-9].0.").search, src6))
src_param_keys = src6n + src6b
dst1 = list(filter(re.compile("^backbone.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^decoder.pass[1-9].hg.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("^decoder.pass[1-9].pre_block.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("^decoder.pass[1-9].post_block.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("^decoder.pass[1-9].pre_merge_block.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("^decoder.pass[1-9].post_merge_block.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if (src_model == "oth_shufflenetv2_wd2" and dst_model == "shufflenetv2_wd2") and \
(src_key == "network.8.weight"):
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy()[:, :, 0, 0])
else:
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size())))
assert (tuple(dst_params[dst_key].size()) == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size()))
assert (dst_key.split('.')[-1] == src_key.split('.')[-1])
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_gl2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import torch
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (tuple(dst_params[dst_key].size()) == src_params[src_key].shape)
dst_params[dst_key] = torch.from_numpy(src_params[src_key]._data[0].asnumpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_pt2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].shape == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import re
src_param_keys = [key.replace("/W:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/b:", "/bias:") for key in src_param_keys]
src_param_keys = [key.replace("linear/", "output/") for key in src_param_keys]
src_param_keys = [key.replace("stage", "features/stage") for key in src_param_keys]
src_param_keys = [re.sub("^conv1/", "features/init_block/conv/", key) for key in src_param_keys]
src_param_keys = [re.sub("^conv5/", "features/final_block/conv/", key) for key in src_param_keys]
src_param_keys = [key.replace('/dconv_bn/', '/dconv/bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv_bn/', '/shortcut_dconv/bn/') for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/kernel:", "/W:") for key in src_param_keys]
src_param_keys = [key.replace("/bias:", "/b:") for key in src_param_keys]
src_param_keys = [key.replace("output/", "linear/") for key in src_param_keys]
src_param_keys = [key.replace("features/stage", "stage") for key in src_param_keys]
src_param_keys = [key.replace("features/init_block/conv/", 'conv1/') for key in src_param_keys]
src_param_keys = [key.replace("features/final_block/conv/", 'conv5/') for key in src_param_keys]
src_param_keys = [key.replace('/dconv/bn/', '/dconv_bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv/bn/', '/shortcut_dconv_bn/') for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (src_params[src_key].shape == tuple(dst_params[dst_key].get_shape().as_list()))
sess.run(dst_params[dst_key].assign(src_params[src_key]))
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_tf2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
src_param_keys = [key.replace("/kernel:", "/weight:") for key in src_param_keys]
src_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in src_param_keys]
src_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/weight:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
src_weight = src_params[src_key]
if len(src_weight.shape) == 4:
if src_key.split("/")[-1][:-2] == "dw_kernel":
dst_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
dst_weight = np.transpose(src_weight, axes=(3, 2, 0, 1))
elif len(src_weight.shape) == 2:
dst_weight = np.transpose(src_weight, axes=(1, 0))
else:
dst_weight = src_weight
assert (dst_weight.shape == dst_params[dst_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, dst_weight.shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(dst_weight, ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf22tfl(src_net,
dst_params_file_path):
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model(src_net)
tflite_model = converter.convert()
open(dst_params_file_path, "wb").write(tflite_model)
# batch_size = 1
# input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
# src_net.data_format == "channels_first" else
# (batch_size, src_net.in_size[0], src_net.in_size[1], 3))
# input_data = tf.random.normal(input_shape)
# tf_results = src_net(input_data)
# interpreter = tf.lite.Interpreter(model_content=tflite_model)
# interpreter.allocate_tensors()
# input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
# input_data = np.array(np.random.random_sample(input_details[0]["shape"]), dtype=np.float32)
# interpreter.set_tensor(input_details[0]["index"], input_data)
# interpreter.invoke()
# tflite_results = interpreter.get_tensor(output_details[0]["index"])
# for tf_result, tflite_result in zip(tf_results, tflite_results):
# np.testing.assert_almost_equal(tf_result.numpy(), tflite_result, decimal=5)
def _init_ctx(args):
ctx = None
if args.src_fwk in ("gluon", "mxnet", "keras") or args.dst_fwk in ("gluon", "mxnet", "keras"):
import mxnet as mx
ctx = mx.cpu()
return ctx
def _prepare_src_model(args, ctx, use_cuda):
return prepare_src_model(
src_fwk=args.src_fwk,
src_model=args.src_model,
src_params_file_path=args.src_params,
dst_fwk=args.dst_fwk,
ctx=ctx,
use_cuda=use_cuda,
load_ignore_extra=args.load_ignore_extra,
remove_module=args.remove_module,
num_classes=args.src_num_classes,
in_channels=args.src_in_channels)
def _prepare_dst_model(args, ctx, use_cuda):
return prepare_dst_model(
dst_fwk=args.dst_fwk,
dst_model=args.dst_model,
src_fwk=args.src_fwk,
ctx=ctx,
use_cuda=use_cuda,
num_classes=args.dst_num_classes,
in_channels=args.dst_in_channels,
model_type=args.model_type)
def update_and_initialize_logging(args):
"""
Update arguments ans initialize logging.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
packages = []
pip_packages = []
if (args.src_fwk == "gluon") or (args.dst_fwk == "gluon"):
packages += ["mxnet, numpy"]
pip_packages += ["mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "pytorch") or (args.dst_fwk == "pytorch"):
packages += ["torch", "torchvision"]
if (args.src_fwk == "chainer") or (args.dst_fwk == "chainer"):
packages += ["chainer"]
pip_packages += ["cupy-cuda110", "cupy-cuda112", "chainer"]
if (args.src_fwk == "keras") or (args.dst_fwk == "keras"):
packages += ["keras"]
pip_packages += ["keras", "keras-mxnet", "mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "tensorflow") or (args.dst_fwk == "tensorflow"):
packages += ["tensorflow-gpu"]
pip_packages += ["tensorflow", "tensorflow-gpu", "tensorpack"]
if (args.src_fwk == "tf2") or (args.dst_fwk == "tf2") or (args.dst_fwk == "tfl"):
packages += ["tensorflow"]
pip_packages += ["tensorflow", "tensorflow-gpu"]
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=packages,
log_pip_packages=pip_packages)
def main():
args = parse_args()
ctx = None
use_cuda = False
if args.dst_fwk == "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
update_and_initialize_logging(args=args)
ctx = _init_ctx(args)
src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net =\
_prepare_src_model(args, ctx, use_cuda)
if args.dst_fwk != "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
if ((args.dst_fwk in ["keras", "tensorflow", "tf2"]) and any([s.find("convgroup") >= 0 for s in dst_param_keys]))\
or ((args.src_fwk == "mxnet") and (args.src_model in ["crunet56", "crunet116", "preresnet269b"])):
assert (len(src_param_keys) <= len(dst_param_keys))
elif ((args.dst_fwk == "chainer") and
(args.src_model.startswith("diaresnet") or args.src_model.startswith("diapreresnet"))) or\
args.src_model.startswith("oth_ibppose"):
assert (len(src_param_keys) >= len(dst_param_keys))
elif args.dst_fwk == "tfl":
pass
else:
assert (len(src_param_keys) == len(dst_param_keys))
if args.src_fwk == "gluon" and args.dst_fwk == "gluon":
convert_gl2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
finetune=((args.src_num_classes != args.dst_num_classes) or (args.src_in_channels != args.dst_in_channels)),
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "pytorch" and args.dst_fwk == "pytorch":
convert_pt2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
dst_model=args.dst_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "pytorch":
convert_gl2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "chainer":
convert_gl2ch(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ext_src_param_keys=ext_src_param_keys,
ext_src_param_keys2=ext_src_param_keys2,
src_model=args.src_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "keras":
convert_gl2ke(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tensorflow":
convert_gl2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tf2":
convert_gl2tf2(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model)
elif args.src_fwk == "pytorch" and args.dst_fwk == "gluon":
convert_pt2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "mxnet" and args.dst_fwk == "gluon":
convert_mx2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "tensorflow":
convert_tf2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "gluon":
convert_tf2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "tf2" and args.dst_fwk == "tfl":
convert_tf22tfl(
src_net=src_net,
dst_params_file_path=args.dst_params)
else:
raise NotImplementedError
logging.info("Convert {}-model {} into {}-model {}".format(
args.src_fwk, args.src_model, args.dst_fwk, args.dst_model))
if __name__ == '__main__':
main()
| 87,933 | 51.435301 | 125 | py |
imgclsmob | imgclsmob-master/train_gl_mealv2.py | """
Script for training model on MXNet/Gluon.
"""
import argparse
import time
import logging
import os
import random
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model, validate
from gluon.utils import report_accuracy, get_composite_metric, get_metric_name, get_initializer, get_loss
from gluon.metrics.metrics import LossValue
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_train_data_source, get_val_data_source
from gluon.dataset_utils import get_batch_fn
from gluon.gluoncv2.models.common import Concurrent
from gluon.distillation import MealDiscriminator, MealAdvLoss
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--teacher-models",
type=str,
help="teacher model names to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
'--not-hybridize',
action='store_true',
help='do not hybridize model')
parser.add_argument(
'--not-discriminator',
action='store_true',
help='do not use discriminator')
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--initializer",
type=str,
default="MSRAPrelu",
help="initializer name. options are MSRAPrelu, Xavier and Xavier-gaussian-out-2")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--dlr-factor",
type=float,
default=1.0,
help="discriminator learning rate factor")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=12,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
"""
Prepare trainer.
Parameters:
----------
net : HybridBlock
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
target_lr : float
Final learning rate.
poly_power : float
Power value for poly LR scheduler.
warmup_epochs : int
Number of warmup epochs.
warmup_lr : float
Starting warmup learning rate.
warmup_mode : str
Learning rate scheduler warmup mode.
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
num_training_samples : int
Number of training samples in dataset.
dtype : str
Base data type for tensors.
gamma_wd_mult : float
Weight decay multiplier for batchnorm gamma.
beta_wd_mult : float
Weight decay multiplier for batchnorm beta.
bias_wd_mult : float
Weight decay multiplier for bias.
state_file_path : str, default None
Path for file with trainer state.
Returns:
-------
Trainer
Trainer.
LRScheduler
Learning rate scheduler.
"""
if gamma_wd_mult != 1.0:
for k, v in net.collect_params(".*gamma").items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params(".*beta").items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params(".*bias").items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {"learning_rate": lr,
"wd": wd,
"momentum": momentum,
"lr_scheduler": lr_scheduler}
if dtype != "float32":
optimizer_params["multi_precision"] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info("Loading trainer states: {}".format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info("Reset the weight decay: {}".format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
net : HybridBlock
Model.
trainer : Trainer
Trainer.
"""
net.save_parameters(file_stem + ".params")
trainer.save_states(file_stem + ".states")
def train_epoch(epoch,
net,
teacher_net,
discrim_net,
train_metric,
loss_metrics,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
discrim_loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
num_epochs,
grad_clip_value,
batch_size_scale):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : HybridBlock
Model.
teacher_net : HybridBlock or None
Teacher model.
discrim_net : HybridBlock or None
MEALv2 discriminator model.
train_metric : EvalMetric
Metric object instance.
loss_metric : list of EvalMetric
Metric object instances (loss values).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
loss_func : Loss
Loss function.
discrim_loss_func : Loss or None
MEALv2 adversarial loss function.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
num_epochs : int
Number of training epochs.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
Returns:
-------
float
Loss value.
"""
labels_list_inds = None
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
train_metric.reset()
for m in loss_metrics:
m.reset()
i = 0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
labels_one_hot = False
if teacher_net is not None:
labels_list = [teacher_net(x.astype(dtype, copy=False)).softmax(axis=-1).mean(axis=1) for x in data_list]
labels_list_inds = [y.argmax(axis=-1) for y in labels_list]
labels_one_hot = True
if label_smoothing and not (teacher_net is not None):
eta = 0.1
on_value = 1 - eta + eta / num_classes
off_value = eta / num_classes
if not labels_one_hot:
labels_list_inds = labels_list
labels_list = [y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value)
for y in labels_list]
labels_one_hot = True
if mixup:
if not labels_one_hot:
labels_list_inds = labels_list
labels_list = [y.one_hot(depth=num_classes) for y in labels_list]
labels_one_hot = True
if epoch < num_epochs - mixup_epoch_tail:
alpha = 1
lam = np.random.beta(alpha, alpha)
data_list = [lam * x + (1 - lam) * x[::-1] for x in data_list]
labels_list = [lam * y + (1 - lam) * y[::-1] for y in labels_list]
with ag.record():
outputs_list = [net(x.astype(dtype, copy=False)) for x in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)]
if discrim_net is not None:
d_pred_list = [discrim_net(yhat.astype(dtype, copy=False).softmax()) for yhat in outputs_list]
d_label_list = [discrim_net(y.astype(dtype, copy=False)) for y in labels_list]
d_loss_list = [discrim_loss_func(yhat, y) for yhat, y in zip(d_pred_list, d_label_list)]
loss_list = [z + dz for z, dz in zip(loss_list, d_loss_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_metric.update(
labels=(labels_list if not labels_one_hot else labels_list_inds),
preds=outputs_list)
loss_metrics[0].update(labels=None, preds=loss_list)
if (discrim_net is not None) and (len(loss_metrics) > 1):
loss_metrics[1].update(labels=None, preds=d_loss_list)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
loss_accuracy_msg = report_accuracy(metric=loss_metrics[0])
if (discrim_net is not None) and (len(loss_metrics) > 1):
dloss_accuracy_msg = report_accuracy(metric=loss_metrics[1])
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\t{}\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, loss_accuracy_msg, dloss_accuracy_msg,
trainer.learning_rate))
else:
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, loss_accuracy_msg, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_accuracy_msg = report_accuracy(metric=train_metric)
loss_accuracy_msg = report_accuracy(metric=loss_metrics[0])
if (discrim_net is not None) and (len(loss_metrics) > 1):
dloss_accuracy_msg = report_accuracy(metric=loss_metrics[1])
logging.info("[Epoch {}] training: {}\t{}\t{}".format(epoch + 1, train_accuracy_msg, loss_accuracy_msg,
dloss_accuracy_msg))
else:
logging.info("[Epoch {}] training: {}\t{}".format(epoch + 1, train_accuracy_msg, loss_accuracy_msg))
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
teacher_net,
discrim_net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
grad_clip_value,
batch_size_scale,
val_metric,
train_metric,
loss_metrics,
loss_func,
discrim_loss_func,
ctx):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (training subset).
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (validation subset).
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
net : HybridBlock
Model.
teacher_net : HybridBlock or None
Teacher model.
discrim_net : HybridBlock or None
MEALv2 discriminator model.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
loss_metrics : list of EvalMetric
Metric object instances (loss values).
loss_func : Loss
Loss object instance.
discrim_loss_func : Loss or None
MEALv2 adversarial loss function.
ctx : Context
MXNet context.
"""
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = "add"
if isinstance(ctx, mx.Context):
ctx = [ctx]
# loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing)))
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_epoch(
epoch=epoch,
net=net,
teacher_net=teacher_net,
discrim_net=discrim_net,
train_metric=train_metric,
loss_metrics=loss_metrics,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
discrim_loss_func=discrim_loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
mixup=mixup,
mixup_epoch_tail=mixup_epoch_tail,
label_smoothing=label_smoothing,
num_classes=num_classes,
num_epochs=num_epochs,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
lp_saver_kwargs = {"net": net, "trainer": trainer}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [loss_metrics[0].get()[1], trainer.learning_rate]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
use_teacher = (args.teacher_models is not None) and (args.teacher_models.strip() != "")
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=(not args.not_hybridize),
initializer=get_initializer(initializer_name=args.initializer),
ctx=ctx)
assert (hasattr(net, "classes"))
num_classes = net.classes
teacher_net = None
discrim_net = None
discrim_loss_func = None
if use_teacher:
teacher_nets = []
for teacher_model in args.teacher_models.split(","):
teacher_net = prepare_model(
model_name=teacher_model.strip(),
use_pretrained=True,
pretrained_model_file_path="",
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
do_hybridize=(not args.not_hybridize),
ctx=ctx)
assert (teacher_net.classes == net.classes)
assert (teacher_net.in_size == net.in_size)
teacher_nets.append(teacher_net)
if len(teacher_nets) > 0:
teacher_net = Concurrent(stack=True, prefix="", branches=teacher_nets)
for k, v in teacher_net.collect_params().items():
v.grad_req = "null"
if not args.not_discriminator:
discrim_net = MealDiscriminator()
discrim_net.cast(args.dtype)
if not args.not_hybridize:
discrim_net.hybridize(
static_alloc=True,
static_shape=True)
discrim_net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
for k, v in discrim_net.collect_params().items():
v.lr_mult = args.dlr_factor
discrim_loss_func = MealAdvLoss()
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".params", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
val_metric = get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs)
train_metric = get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs)
loss_metrics = [LossValue(name="loss"), LossValue(name="dloss")]
loss_kwargs = {"sparse_label": (not (args.mixup or args.label_smoothing) and
not (use_teacher and (teacher_net is not None)))}
if ds_metainfo.loss_extra_kwargs is not None:
loss_kwargs.update(ds_metainfo.loss_extra_kwargs)
loss_func = get_loss(ds_metainfo.loss_name, loss_kwargs)
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
dtype=args.dtype,
net=net,
teacher_net=teacher_net,
discrim_net=discrim_net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
mixup=args.mixup,
mixup_epoch_tail=args.mixup_epoch_tail,
label_smoothing=args.label_smoothing,
num_classes=num_classes,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
val_metric=val_metric,
train_metric=train_metric,
loss_metrics=loss_metrics,
loss_func=loss_func,
discrim_loss_func=discrim_loss_func,
ctx=ctx)
if __name__ == "__main__":
main()
| 33,553 | 32.188922 | 119 | py |
imgclsmob | imgclsmob-master/train_pt.py | """
Script for training model on PyTorch.
"""
import os
import time
import logging
import argparse
import random
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from pytorch.utils import prepare_pt_context, prepare_model, validate
from pytorch.utils import report_accuracy, get_composite_metric, get_metric_name
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=15,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="torch, torchvision",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
else:
cudnn.deterministic = True
logging.warning(
"You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down "
"your training considerably! You may see unexpected behavior when restarting from checkpoints.")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
num_epochs,
state_file_path):
"""
Prepare trainer.
Parameters:
----------
net : Module
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
num_epochs : int
Number of training epochs.
state_file_path : str
Path for file with trainer state.
Returns:
-------
Optimizer
Optimizer.
LRScheduler
Learning rate scheduler.
int
Start epoch.
"""
optimizer_name = optimizer_name.lower()
if (optimizer_name == "sgd") or (optimizer_name == "nag"):
optimizer = torch.optim.SGD(
params=net.parameters(),
lr=lr,
momentum=momentum,
weight_decay=wd,
nesterov=(optimizer_name == "nag"))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
if state_file_path:
checkpoint = torch.load(state_file_path)
if type(checkpoint) == dict:
optimizer.load_state_dict(checkpoint["optimizer"])
start_epoch = checkpoint["epoch"]
else:
start_epoch = None
else:
start_epoch = None
cudnn.benchmark = True
lr_mode = lr_mode.lower()
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
if (lr_mode == "step") and (lr_decay_period != 0):
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=lr_decay_period,
gamma=lr_decay,
last_epoch=-1)
elif (lr_mode == "multistep") or ((lr_mode == "step") and (lr_decay_period == 0)):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_epoch,
gamma=lr_decay,
last_epoch=-1)
elif lr_mode == "cosine":
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer=optimizer,
T_max=num_epochs,
last_epoch=(num_epochs - 1))
else:
raise ValueError("Usupported lr_scheduler: {}".format(lr_mode))
return optimizer, lr_scheduler, start_epoch
def save_params(file_stem,
state):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
state : dict
Whole state of model & trainer.
trainer : Trainer
Trainer.
"""
torch.save(
obj=state["state_dict"],
f=(file_stem + ".pth"))
torch.save(
obj=state,
f=(file_stem + ".states"))
def train_epoch(epoch,
net,
train_metric,
train_data,
use_cuda,
L,
optimizer,
# lr_scheduler,
batch_size,
log_interval):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : Module
Model.
train_metric : EvalMetric
Metric object instance.
train_data : DataLoader
Data loader.
use_cuda : bool
Whether to use CUDA.
L : Loss
Loss function.
optimizer : Optimizer
Optimizer.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
Returns:
-------
float
Loss value.
"""
tic = time.time()
net.train()
train_metric.reset()
train_loss = 0.0
btic = time.time()
for i, (data, target) in enumerate(train_data):
if use_cuda:
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = net(data)
loss = L(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_metric.update(
labels=target,
preds=output)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, optimizer.param_groups[0]["lr"]))
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
net,
optimizer,
lr_scheduler,
lp_saver,
log_interval,
num_classes,
val_metric,
train_metric,
use_cuda):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader
Data loader (training subset).
val_data : DataLoader
Data loader (validation subset).
net : Module
Model.
optimizer : Optimizer
Optimizer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
num_classes : int
Number of model classes.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
use_cuda : bool
Whether to use CUDA.
"""
assert (num_classes > 0)
L = nn.CrossEntropyLoss()
if use_cuda:
L = L.cuda()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
lr_scheduler.step()
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
use_cuda=use_cuda,
L=L,
optimizer=optimizer,
# lr_scheduler,
batch_size=batch_size,
log_interval=log_interval)
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
state = {
"epoch": epoch + 1,
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
}
lp_saver_kwargs = {"state": state}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, optimizer.param_groups[0]["lr"]]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda)
real_net = net.module if hasattr(net, "module") else net
assert (hasattr(real_net, "num_classes"))
num_classes = real_net.num_classes
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
optimizer, lr_scheduler, start_epoch = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
num_epochs=args.num_epochs,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".pth", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
net=net,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
num_classes=num_classes,
val_metric=get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs),
train_metric=get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs),
use_cuda=use_cuda)
if __name__ == "__main__":
main()
| 20,958 | 28.519718 | 119 | py |
imgclsmob | imgclsmob-master/train_gl.py | """
Script for training model on MXNet/Gluon.
"""
import argparse
import time
import logging
import os
import random
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model, validate
from gluon.utils import report_accuracy, get_composite_metric, get_metric_name, get_initializer, get_loss
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_train_data_source, get_val_data_source
from gluon.dataset_utils import get_batch_fn
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
'--not-hybridize',
action='store_true',
help='do not hybridize model')
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--initializer",
type=str,
default="MSRAPrelu",
help="initializer name. options are MSRAPrelu, Xavier and Xavier-gaussian-out-2")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=12,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, LibriSpeech,"
" MCV")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
"""
Prepare trainer.
Parameters:
----------
net : HybridBlock
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
target_lr : float
Final learning rate.
poly_power : float
Power value for poly LR scheduler.
warmup_epochs : int
Number of warmup epochs.
warmup_lr : float
Starting warmup learning rate.
warmup_mode : str
Learning rate scheduler warmup mode.
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
num_training_samples : int
Number of training samples in dataset.
dtype : str
Base data type for tensors.
gamma_wd_mult : float
Weight decay multiplier for batchnorm gamma.
beta_wd_mult : float
Weight decay multiplier for batchnorm beta.
bias_wd_mult : float
Weight decay multiplier for bias.
state_file_path : str, default None
Path for file with trainer state.
Returns:
-------
Trainer
Trainer.
LRScheduler
Learning rate scheduler.
"""
if gamma_wd_mult != 1.0:
for k, v in net.collect_params(".*gamma").items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params(".*beta").items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params(".*bias").items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {"learning_rate": lr,
"wd": wd,
"momentum": momentum,
"lr_scheduler": lr_scheduler}
if dtype != "float32":
optimizer_params["multi_precision"] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info("Loading trainer states: {}".format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info("Reset the weight decay: {}".format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
net : HybridBlock
Model.
trainer : Trainer
Trainer.
"""
net.save_parameters(file_stem + ".params")
trainer.save_states(file_stem + ".states")
def train_epoch(epoch,
net,
train_metric,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
num_epochs,
grad_clip_value,
batch_size_scale):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : HybridBlock
Model.
train_metric : EvalMetric
Metric object instance.
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
loss_func : Loss
Loss function.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
num_epochs : int
Number of training epochs.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
Returns:
-------
float
Loss value.
"""
labels_list_inds = None
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
train_metric.reset()
train_loss = 0.0
i = 0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
if label_smoothing:
eta = 0.1
on_value = 1 - eta + eta / num_classes
off_value = eta / num_classes
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value) for Y in labels_list]
if mixup:
if not label_smoothing:
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes) for Y in labels_list]
if epoch < num_epochs - mixup_epoch_tail:
alpha = 1
lam = np.random.beta(alpha, alpha)
data_list = [lam * X + (1 - lam) * X[::-1] for X in data_list]
labels_list = [lam * Y + (1 - lam) * Y[::-1] for Y in labels_list]
with ag.record():
outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list)
train_metric.update(
labels=(labels_list if not (mixup or label_smoothing) else labels_list_inds),
preds=outputs_list)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
grad_clip_value,
batch_size_scale,
val_metric,
train_metric,
loss_func,
ctx):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (training subset).
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (validation subset).
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
net : HybridBlock
Model.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
loss_func : Loss
Loss object instance.
ctx : Context
MXNet context.
"""
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = "add"
if isinstance(ctx, mx.Context):
ctx = [ctx]
# loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing)))
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
mixup=mixup,
mixup_epoch_tail=mixup_epoch_tail,
label_smoothing=label_smoothing,
num_classes=num_classes,
num_epochs=num_epochs,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
lp_saver_kwargs = {"net": net, "trainer": trainer}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, trainer.learning_rate]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=(not args.not_hybridize),
initializer=get_initializer(initializer_name=args.initializer),
ctx=ctx)
assert (hasattr(net, "classes"))
num_classes = net.classes
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".params", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
val_metric = get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs)
train_metric = get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs)
loss_kwargs = {"sparse_label": not (args.mixup or args.label_smoothing)}
if ds_metainfo.loss_extra_kwargs is not None:
loss_kwargs.update(ds_metainfo.loss_extra_kwargs)
loss_func = get_loss(ds_metainfo.loss_name, loss_kwargs)
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
dtype=args.dtype,
net=net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
mixup=args.mixup,
mixup_epoch_tail=args.mixup_epoch_tail,
label_smoothing=args.label_smoothing,
num_classes=num_classes,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
val_metric=val_metric,
train_metric=train_metric,
loss_func=loss_func,
ctx=ctx)
if __name__ == "__main__":
main()
| 28,277 | 30.489978 | 119 | py |
imgclsmob | imgclsmob-master/chainer_/chainercv2/models/quartznet.py | """
QuartzNet for ASR, implemented in Chainer.
Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,'
https://arxiv.org/abs/1910.10261.
"""
__all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de',
'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru',
'quartznet15x5_ru34']
from .jasper import get_jasper
def quartznet5x5_en_ls(classes=29, **kwargs):
"""
QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet5x5_en_ls", **kwargs)
def quartznet15x5_en(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en", **kwargs)
def quartznet15x5_en_nr(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en_nr", **kwargs)
def quartznet15x5_fr(classes=43, **kwargs):
"""
QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 43
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_fr", **kwargs)
def quartznet15x5_de(classes=32, **kwargs):
"""
QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 32
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_de", **kwargs)
def quartznet15x5_it(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_it", **kwargs)
def quartznet15x5_es(classes=36, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 36
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_es", **kwargs)
def quartznet15x5_ca(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ca", **kwargs)
def quartznet15x5_pl(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_pl", **kwargs)
def quartznet15x5_ru(classes=35, **kwargs):
"""
QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 35
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru", **kwargs)
def quartznet15x5_ru34(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D
Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru34", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
from_audio = True
audio_features = 64
models = [
quartznet5x5_en_ls,
quartznet15x5_en,
quartznet15x5_en_nr,
quartznet15x5_fr,
quartznet15x5_de,
quartznet15x5_it,
quartznet15x5_es,
quartznet15x5_ca,
quartznet15x5_pl,
quartznet15x5_ru,
quartznet15x5_ru34,
]
for model in models:
net = model(
in_channels=audio_features,
from_audio=from_audio,
pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != quartznet5x5_en_ls or weight_count == 6713181)
assert (model != quartznet15x5_en or weight_count == 18924381)
assert (model != quartznet15x5_en_nr or weight_count == 18924381)
assert (model != quartznet15x5_fr or weight_count == 18938731)
assert (model != quartznet15x5_de or weight_count == 18927456)
assert (model != quartznet15x5_it or weight_count == 18934631)
assert (model != quartznet15x5_es or weight_count == 18931556)
assert (model != quartznet15x5_ca or weight_count == 18934631)
assert (model != quartznet15x5_pl or weight_count == 18929506)
assert (model != quartznet15x5_ru or weight_count == 18930531)
assert (model != quartznet15x5_ru34 or weight_count == 18929506)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = np.random.rand(*x_shape).astype(np.float32)
x_len = seq_len.astype(np.long)
y, y_len = net(x, x_len)
assert (y.shape[:2] == (batch, net.classes))
if from_audio:
assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 13,081 | 42.899329 | 119 | py |
imgclsmob | imgclsmob-master/chainer_/metrics/det_metrics.py | """
Evaluation Metrics for Object Detection.
"""
import warnings
import numpy as np
import mxnet as mx
__all__ = ['CocoDetMApMetric']
class CocoDetMApMetric(mx.metric.EvalMetric):
"""
Detection metric for COCO bbox task.
Parameters:
----------
img_height : int
Processed image height.
coco_annotations_file_path : str
COCO anotation file path.
contiguous_id_to_json : list of int
Processed IDs.
validation_ids : bool, default False
Whether to use temporary file for estimation.
use_file : bool, default False
Whether to use temporary file for estimation.
score_thresh : float, default 0.05
Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results.
data_shape : tuple of int, default is None
If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions.
This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be
fixed for all validation images.
post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h)
If not None, the bounding boxes will be affine transformed rather than simply scaled.
name : str, default 'mAP'
Name of this metric instance for display.
"""
def __init__(self,
img_height,
coco_annotations_file_path,
contiguous_id_to_json,
validation_ids=None,
use_file=False,
score_thresh=0.05,
data_shape=None,
post_affine=None,
name="mAP"):
super(CocoDetMApMetric, self).__init__(name=name)
self.img_height = img_height
self.coco_annotations_file_path = coco_annotations_file_path
self.contiguous_id_to_json = contiguous_id_to_json
self.validation_ids = validation_ids
self.use_file = use_file
self.score_thresh = score_thresh
self.current_idx = 0
self.coco_result = []
if isinstance(data_shape, (tuple, list)):
assert len(data_shape) == 2, "Data shape must be (height, width)"
elif not data_shape:
data_shape = None
else:
raise ValueError("data_shape must be None or tuple of int as (height, width)")
self._data_shape = data_shape
if post_affine is not None:
assert self._data_shape is not None, "Using post affine transform requires data_shape"
self._post_affine = post_affine
else:
self._post_affine = None
from pycocotools.coco import COCO
self.gt = COCO(self.coco_annotations_file_path)
self._img_ids = sorted(self.gt.getImgIds())
def reset(self):
self.current_idx = 0
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
if self.current_idx != len(self._img_ids):
warnings.warn("Recorded {} out of {} validation images, incomplete results".format(
self.current_idx, len(self._img_ids)))
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "bbox")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update2(self,
pred_bboxes,
pred_labels,
pred_scores):
"""
Update internal buffer with latest predictions. Note that the statistics are not available until you call
self.get() to return the metrics.
Parameters:
----------
pred_bboxes : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes scores with shape `B, N`.
"""
def as_numpy(a):
"""
Convert a (list of) mx.NDArray into numpy.ndarray
"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
return np.concatenate(out, axis=0)
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]):
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :].astype(np.float)
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred].astype(np.float)
imgid = self._img_ids[self.current_idx]
self.current_idx += 1
affine_mat = None
if self._data_shape is not None:
entry = self.gt.loadImgs(imgid)[0]
orig_height = entry["height"]
orig_width = entry["width"]
height_scale = float(orig_height) / self._data_shape[0]
width_scale = float(orig_width) / self._data_shape[1]
if self._post_affine is not None:
affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0])
else:
height_scale, width_scale = (1.0, 1.0)
# for each bbox detection in each image
for bbox, label, score in zip(pred_bbox, pred_label, pred_score):
if label not in self.contiguous_id_to_json:
# ignore non-exist class
continue
if score < self.score_thresh:
continue
category_id = self.contiguous_id_to_json[label]
# rescale bboxes/affine transform bboxes
if affine_mat is not None:
bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat)
bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat)
else:
bbox[[0, 2]] *= width_scale
bbox[[1, 3]] *= height_scale
# convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h]
bbox[2:4] -= (bbox[:2] - 1)
self.coco_result.append({"image_id": imgid,
"category_id": category_id,
"bbox": bbox[:4].tolist(),
"score": score})
def update(self, labels, preds):
det_bboxes = []
det_ids = []
det_scores = []
for x_rr, y in zip(preds, labels):
bboxes = x_rr.slice_axis(axis=-1, begin=0, end=4)
ids = x_rr.slice_axis(axis=-1, begin=4, end=5).squeeze(axis=2)
scores = x_rr.slice_axis(axis=-1, begin=5, end=6).squeeze(axis=2)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, self.img_height))
self.update2(det_bboxes, det_ids, det_scores)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : numpy.ndarray
Bounding box with shape (1, 2).
t : numpy.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
numpy.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
| 8,392 | 38.219626 | 119 | py |
imgclsmob | imgclsmob-master/chainer_/datasets/coco_hpe2_dataset.py | """
COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose).
"""
import os
import json
import math
import cv2
from operator import itemgetter
import numpy as np
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe2Dataset(GetterDataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe2Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
with open(annotations_file_path, "r") as f:
self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
file_name = self.file_names[idx]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
img_mean = (128, 128, 128)
img_scale = 1.0 / 256
base_height = 368
stride = 8
pad_value = (0, 0, 0)
height, width, _ = image.shape
image = self.normalize(image, img_mean, img_scale)
ratio = base_height / float(image.shape[0])
image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
min_dims = [base_height, max(image.shape[1], base_height)]
image, pad = self.pad_width(
image,
stride,
pad_value,
min_dims)
image = image.astype(np.float32)
image = image.transpose((2, 0, 1))
# image = torch.from_numpy(image)
# if self.transform is not None:
# image = self.transform(image)
image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + [height, width], np.float32)
# label = torch.from_numpy(label)
return image, label
def _get_image(self, idx):
image, label = self[idx]
return image
def _get_label(self, idx):
image, label = self[idx]
return label
@staticmethod
def normalize(img,
img_mean,
img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
@staticmethod
def pad_width(img,
stride,
pad_value,
min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
top = int(math.floor((min_dims[0] - h) / 2.0))
left = int(math.floor((min_dims[1] - w) / 2.0))
bottom = int(min_dims[0] - h - top)
right = int(min_dims[1] - w - left)
pad = [top, left, bottom, right]
padded_img = cv2.copyMakeBorder(
src=img,
top=top,
bottom=bottom,
left=left,
right=right,
borderType=cv2.BORDER_CONSTANT,
value=pad_value)
return padded_img, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def extract_keypoints(heatmap,
all_keypoints,
total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant")
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i + 1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (
keypoints[i][0],
keypoints[i][1],
heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
def group_keypoints(all_keypoints_by_type,
pafs,
pose_entry_size=20,
min_paf_score=0.05):
def linspace2d(start, stop, n=10):
points = 1 / (n - 1) * (stop - start)
return points[:, None] * np.arange(n) + start[:, None]
BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19],
[26, 27])
pose_entries = []
all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
num_kpts_a = len(kpts_a)
num_kpts_b = len(kpts_b)
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
continue
elif num_kpts_a == 0: # body part has just 'b' keypoints
for i in range(num_kpts_b):
num = 0
for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
pose_entry[-1] = 1 # num keypoints in pose
pose_entry[-2] = kpts_b[i][2] # pose score
pose_entries.append(pose_entry)
continue
elif num_kpts_b == 0: # body part has just 'a' keypoints
for i in range(num_kpts_a):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = kpts_a[i][3]
pose_entry[-1] = 1
pose_entry[-2] = kpts_a[i][2]
pose_entries.append(pose_entry)
continue
connections = []
for i in range(num_kpts_a):
kpt_a = np.array(kpts_a[i][0:2])
for j in range(num_kpts_b):
kpt_b = np.array(kpts_b[j][0:2])
mid_point = [(), ()]
mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
mid_point[1] = mid_point[0]
vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
if vec_norm == 0:
continue
vec[0] /= vec_norm
vec[1] /= vec_norm
cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
height_n = pafs.shape[0] // 2
success_ratio = 0
point_num = 10 # number of points to integration over paf
if cur_point_score > -100:
passed_point_score = 0
passed_point_num = 0
x, y = linspace2d(kpt_a, kpt_b)
for point_idx in range(point_num):
px = int(round(x[point_idx]))
py = int(round(y[point_idx]))
paf = part_pafs[py, px, 0:2]
cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
if cur_point_score > min_paf_score:
passed_point_score += cur_point_score
passed_point_num += 1
success_ratio = passed_point_num / point_num
ratio = 0
if passed_point_num > 0:
ratio = passed_point_score / passed_point_num
ratio += min(height_n / vec_norm - 1, 0)
if ratio > 0 and success_ratio > 0.8:
score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
connections.append([i, j, ratio, score_all])
if len(connections) > 0:
connections = sorted(connections, key=itemgetter(2), reverse=True)
num_connections = min(num_kpts_a, num_kpts_b)
has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
filtered_connections = []
for row in range(len(connections)):
if len(filtered_connections) == num_connections:
break
i, j, cur_point_score = connections[row][0:3]
if not has_kpt_a[i] and not has_kpt_b[j]:
filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
has_kpt_a[i] = 1
has_kpt_b[j] = 1
connections = filtered_connections
if len(connections) == 0:
continue
if part_id == 0:
pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
elif part_id == 17 or part_id == 18:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def convert_to_coco_format(pose_entries, all_keypoints):
coco_keypoints = []
scores = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
keypoints = [0] * 17 * 3
to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
person_score = pose_entries[n][-2]
position_id = -1
for keypoint_id in pose_entries[n][:-2]:
position_id += 1
if position_id == 1: # no 'neck' in COCO
continue
cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found
if keypoint_id != -1:
cx, cy, score = all_keypoints[int(keypoint_id), 0:3]
cx = cx + 0.5
cy = cy + 0.5
visibility = 1
keypoints[to_coco_map[position_id] * 3 + 0] = cx
keypoints[to_coco_map[position_id] * 3 + 1] = cy
keypoints[to_coco_map[position_id] * 3 + 2] = visibility
coco_keypoints.append(keypoints)
scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck'
return coco_keypoints, scores
def recalc_pose(pred,
label):
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
heights = label[:, 6].astype(np.int32)
widths = label[:, 7].astype(np.int32)
keypoints = 19
stride = 8
heatmap2ds = pred[:, :keypoints]
paf2ds = pred[:, keypoints:(3 * keypoints)]
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
height = int(heights[batch_i])
width = int(widths[batch_i])
heatmap2d = heatmap2ds[batch_i]
paf2d = paf2ds[batch_i]
heatmaps = np.transpose(heatmap2d, (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]
heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)
pafs = np.transpose(paf2d, (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]
pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18): # 19th for bg
total_keypoints_num += extract_keypoints(
heatmaps[:, :, kpt_idx],
all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(
all_keypoints_by_type,
pafs)
coco_keypoints, scores = convert_to_coco_format(
pose_entries,
all_keypoints)
pred_pts_score.append(coco_keypoints)
pred_person_score.append(scores)
label_img_id_.append([label_img_id_i] * len(scores))
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0])
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe2MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe2Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (368, 368)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe2MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
| 20,988 | 39.597679 | 119 | py |
imgclsmob | imgclsmob-master/chainer_/datasets/coco_hpe3_dataset.py | """
COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose).
"""
import os
import math
import cv2
import numpy as np
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe3Dataset(GetterDataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe3Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
# with open(annotations_file_path, "r") as f:
# self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
from pycocotools.coco import COCO
self.coco_gt = COCO(self.annotations_file_path)
self.validation_ids = self.coco_gt.getImgIds()[:]
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.validation_ids)
def __getitem__(self, idx):
# file_name = self.file_names[idx]["file_name"]
image_id = self.validation_ids[idx]
file_name = self.coco_gt.imgs[image_id]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
image_src_shape = image.shape[:2]
boxsize = 512
max_downsample = 64
pad_value = 128
scale = boxsize / image.shape[0]
if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800:
scale = min(2600 / image.shape[0], 3800 / image.shape[1])
image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
image, pad = self.pad_right_down_corner(image, max_downsample, pad_value)
image = np.float32(image / 255)
image = image.transpose((2, 0, 1))
# image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32)
return image, label
def _get_image(self, idx):
image, label = self[idx]
return image
def _get_label(self, idx):
image, label = self[idx]
return label
@staticmethod
def pad_right_down_corner(img,
stride,
pad_value):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def recalc_pose(pred,
label):
dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15,
14: 2, 15: 1, 16: 4, 17: 3}
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne",
"Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck',
'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho',
'Lhip', 'Rear', 'Lear', 'Rhip']
limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho',
'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip',
'Rkne', 'Rsho', 'Lsho', 'Lhip']
limb_from = [parts_dict[n] for n in limb_from]
limb_to = [parts_dict[n] for n in limb_to]
assert limb_from == [x for x in [
1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]]
assert limb_to == [x for x in [
0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]]
limbs_conn = list(zip(limb_from, limb_to))
limb_seq = limbs_conn
paf_layers = 30
num_layers = 50
stride = 4
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
image_src_shapes = label[:, 6:8].astype(np.int32)
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
image_src_shape = list(image_src_shapes[batch_i])
output_blob = pred[batch_i].transpose((1, 2, 0))
output_paf = output_blob[:, :, :paf_layers]
output_heatmap = output_blob[:, :, paf_layers:num_layers]
heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
all_peaks = find_peaks(heatmap)
connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq)
subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq)
for s in subset[..., 0]:
keypoint_indexes = s[:18]
person_keypoint_coordinates = []
for index in keypoint_indexes:
if index == -1:
X, Y, C = 0, 0, 0
else:
X, Y, C = list(candidate[index.astype(int)][:2]) + [1]
person_keypoint_coordinates.append([X, Y, C])
person_keypoint_coordinates_coco = [None] * 17
for dt_index, gt_index in dt_gt_mapping.items():
if gt_index is None:
continue
person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index]
pred_pts_score.append(person_keypoint_coordinates_coco)
pred_person_score.append(1 - 1.0 / s[18])
label_img_id_.append(label_img_id_i)
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_)
def find_peaks(heatmap_avg):
import torch
thre1 = 0.1
offset_radius = 2
all_peaks = []
peak_counter = 0
heatmap_avg = heatmap_avg.astype(np.float32)
filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...]
filter_map = torch.from_numpy(filter_map).cuda()
filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1)
filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0))
for part in range(18):
map_ori = heatmap_avg[:, :, part]
peaks_binary = filter_map[:, :, part]
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks]
id = range(peak_counter, peak_counter + len(refined_peaks_with_score))
peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
def keypoint_heatmap_nms(heat, kernel=3, thre=0.1):
from torch.nn import functional as F
# keypoint NMS on heatmap (score map)
pad = (kernel - 1) // 2
pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect")
hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0)
keep = (hmax == heat).float() * (heat >= thre).float()
return heat * keep
def refine_centroid(scorefmp, anchor, radius):
"""
Refine the centroid coordinate. It dose not affect the results after testing.
:param scorefmp: 2-D numpy array, original regressed score map
:param anchor: python tuple, (x,y) coordinates
:param radius: int, range of considered scores
:return: refined anchor, refined score
"""
x_c, y_c = anchor
x_min = x_c - radius
x_max = x_c + radius + 1
y_min = y_c - radius
y_max = y_c + radius + 1
if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0:
return anchor + (scorefmp[y_c, x_c], )
score_box = scorefmp[y_min:y_max, x_min:x_max]
x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1]
offset_x = (score_box * x_grid).sum() / score_box.sum()
offset_y = (score_box * y_grid).sum() / score_box.sum()
x_refine = x_c + offset_x
y_refine = y_c + offset_y
refined_anchor = (x_refine, y_refine)
return refined_anchor + (score_box.mean(),)
def find_connections(all_peaks, paf_avg, image_width, limb_seq):
mid_num_ = 20
thre2 = 0.1
connect_ration = 0.8
connection_all = []
special_k = []
for k in range(len(limb_seq)):
score_mid = paf_avg[:, :, k]
candA = all_peaks[limb_seq[k][0]]
candB = all_peaks[limb_seq[k][1]]
nA = len(candA)
nB = len(candB)
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
mid_num = min(int(round(norm + 1)), mid_num_)
if norm == 0:
continue
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for
I in range(len(startend))])
score_midpts = limb_response
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i,
j,
score_with_dist_prior,
norm,
0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True)
connection = np.zeros((0, 6))
for c in range(len(connection_candidate)):
i, j, s, limb_len = connection_candidate[c][0:4]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]])
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
return connection_all, special_k
def find_people(connection_all, special_k, all_peaks, limb_seq):
len_rate = 16.0
connection_tole = 0.7
remove_recon = 0
subset = -1 * np.ones((0, 20, 2))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(limb_seq)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limb_seq[k])
for i in range(len(connection_all[k])):
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)):
if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype(
int) == partBs[i].astype(int):
if found >= 2:
continue
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB][0].astype(int) == -1 and\
len_rate * subset[j][-1][1] > connection_all[k][i][-1]:
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-1][0] += 1
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) != partBs[i].astype(int):
if subset[j][indexB][1] >= connection_all[k][i][2]:
pass
else:
if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]:
continue
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\
subset[j][indexB][1] <= connection_all[k][i][2]:
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
else:
pass
elif found == 2:
j1, j2 = subset_idx
membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2]
membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2]
membership = membership1 + membership2
if len(np.nonzero(membership == 2)[0]) == 0:
min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1])
min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1])
min_tolerance = min(min_limb1, min_limb2)
if connection_all[k][i][2] < connection_tole * min_tolerance or\
len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]:
continue
subset[j1][:-2][...] += (subset[j2][:-2][...] + 1)
subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0]
subset[j1][-2][0] += connection_all[k][i][2]
subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1])
subset = np.delete(subset, j2, 0)
else:
if connection_all[k][i][0] in subset[j1, :-2, 0]:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1])
else:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0])
c1 = int(c1[0])
c2 = int(c2[0])
assert c1 != c2, "an candidate keypoint is used twice, shared by two people"
if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]:
continue
small_j = j1
remove_c = c1
if subset[j1][c1][1] > subset[j2][c2][1]:
small_j = j2
remove_c = c2
if remove_recon > 0:
subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \
subset[small_j][remove_c][1]
subset[small_j][remove_c][0] = -1
subset[small_j][remove_c][1] = -1
subset[small_j][-1][0] -= 1
elif not found and k < len(limb_seq):
row = -1 * np.ones((20, 2))
row[indexA][0] = partAs[i]
row[indexA][1] = connection_all[k][i][2]
row[indexB][0] = partBs[i]
row[indexB][1] = connection_all[k][i][2]
row[-1][0] = 2
row[-1][1] = connection_all[k][i][-1]
row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
row = row[np.newaxis, :, :]
subset = np.concatenate((subset, row), axis=0)
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
return subset, candidate
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe3MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe3MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe3Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (256, 256)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"validation_ids": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe3MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids
| 23,313 | 39.830123 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/airnext.py | """
AirNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten, is_channels_first
from .airnet import AirBlock, AirInitBlock
class AirNeXtBottleneck(nn.Layer):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
ratio,
data_format="channels_last",
**kwargs):
super(AirNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (strides == 1 and mid_channels < 512)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
ratio=ratio,
data_format=data_format,
name="air")
def call(self, x, training=None):
if self.use_air_block:
att = self.air(x, training=training)
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_air_block:
x = x * att
x = self.conv3(x, training=training)
return x
class AirNeXtUnit(nn.Layer):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
ratio,
data_format="channels_last",
**kwargs):
super(AirNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class AirNeXt(tf.keras.Model):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(AirNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)
def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)
def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)
if __name__ == "__main__":
_test()
| 12,866 | 31.087282 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/pspnet.py | """
PSPNet for image segmentation, implemented in TensorFlow.
Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
"""
__all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco',
'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k',
'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, Identity, is_channels_first, interpolate_im,\
get_im_size
from .resnetd import resnetd50b, resnetd101b
class PSPFinalBlock(nn.Layer):
"""
PSPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(PSPFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
self.data_format = data_format
mid_channels = in_channels // bottleneck_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, out_size, training=None):
x = self.conv1(x, training=training)
x = self.dropout(x, training=training)
x = self.conv2(x)
x = interpolate_im(x, out_size=out_size, data_format=self.data_format)
return x
class PyramidPoolingBranch(nn.Layer):
"""
Pyramid Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_out_size : int
Target output size of the image.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(PyramidPoolingBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
self.data_format = data_format
self.pool = nn.AveragePooling2D(
pool_size=pool_out_size,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, training=None):
in_size = self.upscale_out_size if self.upscale_out_size is not None else\
get_im_size(x, data_format=self.data_format)
x = self.pool(x)
x = self.conv(x, training=training)
x = interpolate_im(x, out_size=in_size, data_format=self.data_format)
return x
class PyramidPooling(nn.Layer):
"""
Pyramid Pooling module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert (in_channels % 4 == 0)
mid_channels = in_channels // 4
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(Identity(name="branch1"))
for i, pool_out_size in enumerate(pool_out_sizes):
self.branches.add(PyramidPoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
pool_out_size=pool_out_size,
upscale_out_size=upscale_out_size,
data_format=data_format,
name="branch{}".format(i + 2)))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class PSPNet(tf.keras.Model):
"""
PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
data_format="channels_last",
**kwargs):
super(PSPNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = PyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size,
data_format=data_format,
name="pool")
pool_out_channels = 2 * backbone_out_channels
self.final_block = PSPFinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=8,
data_format=data_format,
name="final_block")
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = PSPFinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4,
data_format=data_format,
name="aux_block")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
x, y = self.backbone(x, training=training)
x = self.pool(x, training=training)
x = self.final_block(x, in_size, training=training)
if self.aux:
y = self.aux_block(y, in_size, training=training)
return x, y
else:
return x
def get_pspnet(backbone,
classes,
aux=False,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PSPNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = PSPNet(
backbone=backbone,
classes=classes,
aux=aux,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def pspnet_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_voc",
data_format=data_format, **kwargs)
def pspnet_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_voc",
data_format=data_format, **kwargs)
def pspnet_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_coco",
data_format=data_format, **kwargs)
def pspnet_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_coco",
data_format=data_format, **kwargs)
def pspnet_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_ade20k",
data_format=data_format, **kwargs)
def pspnet_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_ade20k",
data_format=data_format, **kwargs)
def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes",
data_format=data_format, **kwargs)
def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes",
data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (480, 480)
aux = False
pretrained = False
models = [
(pspnet_resnetd50b_voc, 21),
(pspnet_resnetd101b_voc, 21),
(pspnet_resnetd50b_coco, 21),
(pspnet_resnetd101b_coco, 21),
(pspnet_resnetd50b_ade20k, 150),
(pspnet_resnetd101b_ade20k, 150),
(pspnet_resnetd50b_cityscapes, 19),
(pspnet_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != pspnet_resnetd50b_voc or weight_count == 49081578)
assert (model != pspnet_resnetd101b_voc or weight_count == 68073706)
assert (model != pspnet_resnetd50b_coco or weight_count == 49081578)
assert (model != pspnet_resnetd101b_coco or weight_count == 68073706)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166)
else:
assert (model != pspnet_resnetd50b_voc or weight_count == 46716373)
assert (model != pspnet_resnetd101b_voc or weight_count == 65708501)
assert (model != pspnet_resnetd50b_coco or weight_count == 46716373)
assert (model != pspnet_resnetd101b_coco or weight_count == 65708501)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475)
if __name__ == "__main__":
_test()
| 22,270 | 38.487589 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/dla.py | """
DLA for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
"""
__all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block, SimpleSequential, flatten, is_channels_first,\
get_channel_axis
from .resnet import ResBlock, ResBottleneck
from .resnext import ResNeXtBottleneck
class DLABottleneck(ResBottleneck):
"""
DLA bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 2
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=2,
data_format="channels_last",
**kwargs):
super(DLABottleneck, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck_factor=bottleneck_factor,
data_format=data_format,
**kwargs)
class DLABottleneckX(ResNeXtBottleneck):
"""
DLA ResNeXt-like bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int, default 32
Number of groups.
bottleneck_width: int, default 8
Width of bottleneck block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality=32,
bottleneck_width=8,
data_format="channels_last",
**kwargs):
super(DLABottleneckX, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
**kwargs)
class DLAResBlock(nn.Layer):
"""
DLA residual block with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
body_class : nn.Module, default ResBlock
Residual block body class.
return_down : bool, default False
Whether return downsample result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
body_class=ResBlock,
return_down=False,
data_format="channels_last",
**kwargs):
super(DLAResBlock, self).__init__(**kwargs)
self.return_down = return_down
self.downsample = (strides > 1)
self.project = (in_channels != out_channels)
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
self.activ = nn.ReLU()
if self.downsample:
self.downsample_pool = nn.MaxPool2D(
pool_size=strides,
strides=strides,
data_format=data_format,
name="downsample_pool")
if self.project:
self.project_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="project_conv")
def call(self, x, training=None):
down = self.downsample_pool(x) if self.downsample else x
identity = self.project_conv(down, training=training) if self.project else down
if identity is None:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
if self.return_down:
return x, down
else:
return x
class DLARoot(nn.Layer):
"""
DLA root block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
residual : bool
Whether use residual connection.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
residual,
data_format="channels_last",
**kwargs):
super(DLARoot, self).__init__(**kwargs)
self.residual = residual
self.data_format = data_format
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x2, x1, extra, training=None):
last_branch = x2
x = tf.concat([x2, x1] + list(extra), axis=get_channel_axis(self.data_format))
x = self.conv(x, training=training)
if self.residual:
x += last_branch
x = self.activ(x)
return x
class DLATree(nn.Layer):
"""
DLA tree unit. It's like iterative stage.
Parameters:
----------
levels : int
Number of levels in the stage.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
res_body_class : nn.Module
Residual block body class.
strides : int or tuple/list of 2 int
Strides of the convolution in a residual block.
root_residual : bool
Whether use residual connection in the root.
root_dim : int
Number of input channels in the root block.
first_tree : bool, default False
Is this tree stage the first stage in the net.
input_level : bool, default True
Is this tree unit the first unit in the stage.
return_down : bool, default False
Whether return downsample result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
levels,
in_channels,
out_channels,
res_body_class,
strides,
root_residual,
root_dim=0,
first_tree=False,
input_level=True,
return_down=False,
data_format="channels_last",
**kwargs):
super(DLATree, self).__init__(**kwargs)
self.return_down = return_down
self.add_down = (input_level and not first_tree)
self.root_level = (levels == 1)
if root_dim == 0:
root_dim = 2 * out_channels
if self.add_down:
root_dim += in_channels
if self.root_level:
self.tree1 = DLAResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
body_class=res_body_class,
return_down=True,
data_format=data_format,
name="tree1")
self.tree2 = DLAResBlock(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
body_class=res_body_class,
return_down=False,
data_format=data_format,
name="tree2")
else:
self.tree1 = DLATree(
levels=levels - 1,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=strides,
root_residual=root_residual,
root_dim=0,
input_level=False,
return_down=True,
data_format=data_format,
name="tree1")
self.tree2 = DLATree(
levels=levels - 1,
in_channels=out_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=1,
root_residual=root_residual,
root_dim=root_dim + out_channels,
input_level=False,
return_down=False,
data_format=data_format,
name="tree2")
if self.root_level:
self.root = DLARoot(
in_channels=root_dim,
out_channels=out_channels,
residual=root_residual,
data_format=data_format,
name="root")
def call(self, x, extra=None, training=None):
extra = [] if extra is None else extra
x1, down = self.tree1(x, training=training)
if self.add_down:
extra.append(down)
if self.root_level:
x2 = self.tree2(x1, training=training)
x = self.root(x2, x1, extra, training=training)
else:
extra.append(x1)
x = self.tree2(x1, extra, training=training)
if self.return_down:
return x, down
else:
return x
class DLAInitBlock(nn.Layer):
"""
DLA specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(DLAInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class DLA(tf.keras.Model):
"""
DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
init_block_channels : int
Number of output channels for the initial unit.
res_body_class : nn.Module
Residual block body class.
residual_root : bool
Whether use residual connection in the root blocks.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
levels,
channels,
init_block_channels,
res_body_class,
residual_root,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DLA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(DLAInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i in range(len(levels)):
levels_i = levels[i]
out_channels = channels[i]
first_tree = (i == 0)
self.features.add(DLATree(
levels=levels_i,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=2,
root_residual=residual_root,
first_tree=first_tree,
data_format=data_format,
name="stage{}".format(i + 1)))
in_channels = out_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_dla(levels,
channels,
res_body_class,
residual_root=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DLA model with specific parameters.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
res_body_class : nn.Module
Residual block body class.
residual_root : bool, default False
Whether use residual connection in the root blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
net = DLA(
levels=levels,
channels=channels,
init_block_channels=init_block_channels,
res_body_class=res_body_class,
residual_root=residual_root,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def dla34(**kwargs):
"""
DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34",
**kwargs)
def dla46c(**kwargs):
"""
DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c",
**kwargs)
def dla46xc(**kwargs):
"""
DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla46xc", **kwargs)
def dla60(**kwargs):
"""
DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
model_name="dla60", **kwargs)
def dla60x(**kwargs):
"""
DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
model_name="dla60x", **kwargs)
def dla60xc(**kwargs):
"""
DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla60xc", **kwargs)
def dla102(**kwargs):
"""
DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla102", **kwargs)
def dla102x(**kwargs):
"""
DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
residual_root=True, model_name="dla102x", **kwargs)
def dla102x2(**kwargs):
"""
DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
class DLABottleneckX64(DLABottleneckX):
def __init__(self, in_channels, out_channels, strides, **kwargs):
super(DLABottleneckX64, self).__init__(in_channels, out_channels, strides, cardinality=64, **kwargs)
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64,
residual_root=True, model_name="dla102x2", **kwargs)
def dla169(**kwargs):
"""
DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla169", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
dla34,
dla46c,
dla46xc,
dla60,
dla60x,
dla60xc,
dla102,
dla102x,
dla102x2,
dla169,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dla34 or weight_count == 15742104)
assert (model != dla46c or weight_count == 1301400)
assert (model != dla46xc or weight_count == 1068440)
assert (model != dla60 or weight_count == 22036632)
assert (model != dla60x or weight_count == 17352344)
assert (model != dla60xc or weight_count == 1319832)
assert (model != dla102 or weight_count == 33268888)
assert (model != dla102x or weight_count == 26309272)
assert (model != dla102x2 or weight_count == 41282200)
assert (model != dla169 or weight_count == 53389720)
if __name__ == "__main__":
_test()
| 22,786 | 31.599428 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/proxylessnas.py | """
ProxylessNAS for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14',
'ProxylessUnit', 'get_proxylessnas']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import ConvBlock, conv1x1_block, conv3x3_block, SimpleSequential, flatten, is_channels_first
class ProxylessBlock(nn.Layer):
"""
ProxylessNAS block for residual path in ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_eps,
expansion,
data_format="channels_last",
**kwargs):
super(ProxylessBlock, self).__init__(**kwargs)
self.use_bc = (expansion > 1)
mid_channels = in_channels * expansion
if self.use_bc:
self.bc_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation="relu6",
data_format=data_format,
name="bc_conv")
padding = (kernel_size - 1) // 2
self.dw_conv = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=mid_channels,
bn_eps=bn_eps,
activation="relu6",
data_format=data_format,
name="dw_conv")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
if self.use_bc:
x = self.bc_conv(x, training=training)
x = self.dw_conv(x, training=training)
x = self.pw_conv(x, training=training)
return x
class ProxylessUnit(nn.Layer):
"""
ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for body block.
strides : int
Strides of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio for body block.
residual : bool
Whether to use residual branch.
shortcut : bool
Whether to use identity branch.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_eps,
expansion,
residual,
shortcut,
data_format="channels_last",
**kwargs):
super(ProxylessUnit, self).__init__(**kwargs)
assert (residual or shortcut)
self.residual = residual
self.shortcut = shortcut
if self.residual:
self.body = ProxylessBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_eps=bn_eps,
expansion=expansion,
data_format=data_format,
name="body")
def call(self, x, training=None):
if not self.residual:
return x
if not self.shortcut:
return self.body(x, training=training)
identity = x
x = self.body(x, training=training)
x = identity + x
return x
class ProxylessNAS(tf.keras.Model):
"""
ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
residuals : list of list of int
Whether to use residual branch in units.
shortcuts : list of list of int
Whether to use identity branch in units.
kernel_sizes : list of list of int
Convolution window size for each units.
expansions : list of list of int
Expansion ratio for each units.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
residuals,
shortcuts,
kernel_sizes,
expansions,
bn_eps=1e-3,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ProxylessNAS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_eps=bn_eps,
activation="relu6",
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
residuals_per_stage = residuals[i]
shortcuts_per_stage = shortcuts[i]
kernel_sizes_per_stage = kernel_sizes[i]
expansions_per_stage = expansions[i]
for j, out_channels in enumerate(channels_per_stage):
residual = (residuals_per_stage[j] == 1)
shortcut = (shortcuts_per_stage[j] == 1)
kernel_size = kernel_sizes_per_stage[j]
expansion = expansions_per_stage[j]
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ProxylessUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_eps=bn_eps,
expansion=expansion,
residual=residual,
shortcut=shortcut,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation="relu6",
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_proxylessnas(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ProxylessNAS model with specific parameters.
Parameters:
----------
version : str
Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "cpu":
residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104],
[216, 216, 216, 216, 360]]
kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]]
expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]]
init_block_channels = 40
final_block_channels = 1432
elif version == "gpu":
residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128],
[256, 256, 256, 256, 432]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]]
init_block_channels = 40
final_block_channels = 1728
elif version == "mobile":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96],
[192, 192, 192, 192, 320]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 32
final_block_channels = 1280
elif version == "mobile14":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136],
[256, 256, 256, 256, 448]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 48
final_block_channels = 1792
else:
raise ValueError("Unsupported ProxylessNAS version: {}".format(version))
shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]]
net = ProxylessNAS(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
residuals=residuals,
shortcuts=shortcuts,
kernel_sizes=kernel_sizes,
expansions=expansions,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def proxylessnas_cpu(**kwargs):
"""
ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs)
def proxylessnas_gpu(**kwargs):
"""
ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs)
def proxylessnas_mobile(**kwargs):
"""
ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs)
def proxylessnas_mobile14(**kwargs):
"""
ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
proxylessnas_cpu,
proxylessnas_gpu,
proxylessnas_mobile,
proxylessnas_mobile14,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu or weight_count == 4361648)
assert (model != proxylessnas_gpu or weight_count == 7119848)
assert (model != proxylessnas_mobile or weight_count == 4080512)
assert (model != proxylessnas_mobile14 or weight_count == 6857568)
if __name__ == "__main__":
_test()
| 15,845 | 35.178082 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/shufflenetv2.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock,\
BatchNorm, MaxPool2d, SimpleSequential, get_channel_axis, flatten
class ShuffleUnit(nn.Layer):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
data_format="channels_last",
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.data_format = data_format
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels,
data_format=data_format,
name="compress_conv1")
self.compress_bn1 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="compress_bn1")
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1),
data_format=data_format,
name="dw_conv2")
self.dw_bn2 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="dw_bn2")
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="expand_conv3")
self.expand_bn3 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="expand_bn3")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
data_format=data_format,
name="se")
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
strides=2,
data_format=data_format,
name="dw_conv4")
self.dw_bn4 = BatchNorm(
# in_channels=in_channels,
data_format=data_format,
name="dw_bn4")
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="expand_conv5")
self.expand_bn5 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="expand_bn5")
self.activ = nn.ReLU()
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2,
data_format=data_format,
name="c_shuffle")
def call(self, x, training=None):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1, training=training)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1, training=training)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(self.data_format))
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2, training=training)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2, training=training)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2, training=training)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(nn.Layer):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class ShuffleNetV2(tf.keras.Model):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
if __name__ == "__main__":
_test()
| 13,783 | 32.784314 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/hrnet.py | """
HRNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
"""
__all__ = ['HRNet', 'hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32',
'hrnetv2_w40', 'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, Identity, SimpleSequential, flatten, is_channels_first
from .resnet import ResUnit
class UpSamplingBlock(nn.Layer):
"""
HFNet specific upsampling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
data_format="channels_last",
**kwargs):
super(UpSamplingBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
activation=None,
data_format=data_format,
name="conv")
self.upsample = nn.UpSampling2D(
size=scale_factor,
data_format=data_format,
interpolation="nearest",
name="upsample")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.upsample(x)
return x
class HRBlock(nn.Layer):
"""
HFNet block.
Parameters:
----------
in_channels_list : list of int
Number of input channels.
out_channels_list : list of int
Number of output channels.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblock.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_branches,
num_subblocks,
data_format="channels_last",
**kwargs):
super(HRBlock, self).__init__(**kwargs)
self.in_channels_list = in_channels_list
self.num_branches = num_branches
self.branches = SimpleSequential(name="branches")
for i in range(num_branches):
layers = SimpleSequential(name="branches/branch{}".format(i + 1))
in_channels_i = self.in_channels_list[i]
out_channels_i = out_channels_list[i]
for j in range(num_subblocks[i]):
layers.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=1,
bottleneck=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels_i = out_channels_i
self.in_channels_list[i] = out_channels_i
self.branches.add(layers)
if num_branches > 1:
self.fuse_layers = SimpleSequential(name="fuse_layers")
for i in range(num_branches):
fuse_layer_name = "fuse_layers/fuse_layer{}".format(i + 1)
fuse_layer = SimpleSequential(name=fuse_layer_name)
for j in range(num_branches):
if j > i:
fuse_layer.add(UpSamplingBlock(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
scale_factor=2 ** (j - i),
data_format=data_format,
name=fuse_layer_name + "/block{}".format(j + 1)))
elif j == i:
fuse_layer.add(Identity(name=fuse_layer_name + "/block{}".format(j + 1)))
else:
conv3x3_seq_name = fuse_layer_name + "/block{}_conv3x3_seq".format(j + 1)
conv3x3_seq = SimpleSequential(name=conv3x3_seq_name)
for k in range(i - j):
if k == i - j - 1:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
strides=2,
activation=None,
data_format=data_format,
name="subblock{}".format(k + 1)))
else:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[j],
strides=2,
data_format=data_format,
name="subblock{}".format(k + 1)))
fuse_layer.add(conv3x3_seq)
self.fuse_layers.add(fuse_layer)
self.activ = nn.ReLU()
def call(self, x, training=None):
for i in range(self.num_branches):
x[i] = self.branches[i](x[i], training=training)
if self.num_branches == 1:
return x
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0], training=training)
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j], training=training)
x_fuse.append(self.activ(y))
return x_fuse
class HRStage(nn.Layer):
"""
HRNet stage block.
Parameters:
----------
in_channels_list : list of int
Number of output channels from the previous layer.
out_channels_list : list of int
Number of output channels in the current layer.
num_modules : int
Number of modules.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblocks.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_modules,
num_branches,
num_subblocks,
data_format="channels_last",
**kwargs):
super(HRStage, self).__init__(**kwargs)
self.branches = num_branches
self.in_channels_list = out_channels_list
in_branches = len(in_channels_list)
out_branches = len(out_channels_list)
self.transition = SimpleSequential(name="transition")
for i in range(out_branches):
if i < in_branches:
if out_channels_list[i] != in_channels_list[i]:
self.transition.add(conv3x3_block(
in_channels=in_channels_list[i],
out_channels=out_channels_list[i],
strides=1,
data_format=data_format,
name="transition/block{}".format(i + 1)))
else:
self.transition.add(Identity(name="transition/block{}".format(i + 1)))
else:
conv3x3_seq = SimpleSequential(name="transition/conv3x3_seq{}".format(i + 1))
for j in range(i + 1 - in_branches):
in_channels_i = in_channels_list[-1]
out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=2,
data_format=data_format,
name="subblock{}".format(j + 1)))
self.transition.add(conv3x3_seq)
self.layers = SimpleSequential(name="layers")
for i in range(num_modules):
self.layers.add(HRBlock(
in_channels_list=self.in_channels_list,
out_channels_list=out_channels_list,
num_branches=num_branches,
num_subblocks=num_subblocks,
data_format=data_format,
name="block{}".format(i + 1)))
self.in_channels_list = list(self.layers[-1].in_channels_list)
def call(self, x, training=None):
x_list = []
for j in range(self.branches):
if not isinstance(self.transition[j], Identity):
x_list.append(self.transition[j](x[-1] if type(x) in (list, tuple) else x, training=training))
else:
x_list_j = x[j] if type(x) in (list, tuple) else x
x_list.append(x_list_j)
y_list = self.layers(x_list, training=training)
return y_list
class HRInitBlock(nn.Layer):
"""
HRNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
num_subblocks : int
Number of subblocks.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
num_subblocks,
data_format="channels_last",
**kwargs):
super(HRInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv2")
in_channels = mid_channels
self.subblocks = SimpleSequential(name="subblocks")
for i in range(num_subblocks):
self.subblocks.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
bottleneck=True,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = out_channels
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.subblocks(x, training=training)
return x
class HRFinalBlock(nn.Layer):
"""
HRNet specific final block.
Parameters:
----------
in_channels_list : list of int
Number of input channels per stage.
out_channels_list : list of int
Number of output channels per stage.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_list,
out_channels_list,
data_format="channels_last",
**kwargs):
super(HRFinalBlock, self).__init__(**kwargs)
self.inc_blocks = SimpleSequential(name="inc_blocks")
for i, in_channels_i in enumerate(in_channels_list):
self.inc_blocks.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_list[i],
strides=1,
bottleneck=True,
data_format=data_format,
name="inc_blocks/block{}".format(i + 1)))
self.down_blocks = SimpleSequential(name="down_blocks")
for i in range(len(in_channels_list) - 1):
self.down_blocks.add(conv3x3_block(
in_channels=out_channels_list[i],
out_channels=out_channels_list[i + 1],
strides=2,
use_bias=True,
data_format=data_format,
name="down_blocks/block{}".format(i + 1)))
self.final_layer = conv1x1_block(
in_channels=1024,
out_channels=2048,
strides=1,
use_bias=True,
data_format=data_format,
name="final_layer")
def call(self, x, training=None):
y = self.inc_blocks[0](x[0], training=training)
for i in range(len(self.down_blocks)):
y = self.inc_blocks[i + 1](x[i + 1], training=training) + self.down_blocks[i](y, training=training)
y = self.final_layer(y, training=training)
return y
class HRNet(tf.keras.Model):
"""
HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_num_subblocks : int
Number of subblocks in the initial unit.
num_modules : int
Number of modules per stage.
num_subblocks : list of int
Number of subblocks per stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
init_num_subblocks,
num_modules,
num_subblocks,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(HRNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.branches = [2, 3, 4]
self.features = SimpleSequential(name="features")
self.features.add(HRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
mid_channels=64,
num_subblocks=init_num_subblocks,
data_format=data_format,
name="init_block"))
in_channels_list = [init_block_channels]
for i in range(len(self.branches)):
self.features.add(HRStage(
in_channels_list=in_channels_list,
out_channels_list=channels[i],
num_modules=num_modules[i],
num_branches=self.branches[i],
num_subblocks=num_subblocks[i],
data_format=data_format,
name="stage{}".format(i + 1)))
in_channels_list = self.features[-1].in_channels_list
self.features.add(HRFinalBlock(
in_channels_list=in_channels_list,
out_channels_list=[128, 256, 512, 1024],
data_format=data_format,
name="final_block"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=2048,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_hrnet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create HRNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "w18s1":
init_block_channels = 128
init_num_subblocks = 1
channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]]
num_modules = [1, 1, 1]
elif version == "w18s2":
init_block_channels = 256
init_num_subblocks = 2
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 3, 2]
elif version == "w18":
init_block_channels = 256
init_num_subblocks = 4
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 4, 3]
elif version == "w30":
init_block_channels = 256
init_num_subblocks = 4
channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]]
num_modules = [1, 4, 3]
elif version == "w32":
init_block_channels = 256
init_num_subblocks = 4
channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]]
num_modules = [1, 4, 3]
elif version == "w40":
init_block_channels = 256
init_num_subblocks = 4
channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]]
num_modules = [1, 4, 3]
elif version == "w44":
init_block_channels = 256
init_num_subblocks = 4
channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]]
num_modules = [1, 4, 3]
elif version == "w48":
init_block_channels = 256
init_num_subblocks = 4
channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]]
num_modules = [1, 4, 3]
elif version == "w64":
init_block_channels = 256
init_num_subblocks = 4
channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
num_modules = [1, 4, 3]
else:
raise ValueError("Unsupported HRNet version {}".format(version))
num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels]
net = HRNet(
channels=channels,
init_block_channels=init_block_channels,
init_num_subblocks=init_num_subblocks,
num_modules=num_modules,
num_subblocks=num_subblocks,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def hrnet_w18_small_v1(**kwargs):
"""
HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs)
def hrnet_w18_small_v2(**kwargs):
"""
HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs)
def hrnetv2_w18(**kwargs):
"""
HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs)
def hrnetv2_w30(**kwargs):
"""
HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs)
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
def hrnetv2_w40(**kwargs):
"""
HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs)
def hrnetv2_w44(**kwargs):
"""
HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs)
def hrnetv2_w48(**kwargs):
"""
HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs)
def hrnetv2_w64(**kwargs):
"""
HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
hrnet_w18_small_v1,
hrnet_w18_small_v2,
hrnetv2_w18,
hrnetv2_w30,
hrnetv2_w32,
hrnetv2_w40,
hrnetv2_w44,
hrnetv2_w48,
hrnetv2_w64,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hrnet_w18_small_v1 or weight_count == 13187464)
assert (model != hrnet_w18_small_v2 or weight_count == 15597464)
assert (model != hrnetv2_w18 or weight_count == 21299004)
assert (model != hrnetv2_w30 or weight_count == 37712220)
assert (model != hrnetv2_w32 or weight_count == 41232680)
assert (model != hrnetv2_w40 or weight_count == 57557160)
assert (model != hrnetv2_w44 or weight_count == 67064984)
assert (model != hrnetv2_w48 or weight_count == 77469864)
assert (model != hrnetv2_w64 or weight_count == 128059944)
if __name__ == "__main__":
_test()
| 25,313 | 34.703808 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fcn8sd.py | """
FCN-8s(d) for image segmentation, implemented in TensorFlow.
Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
"""
__all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco',
'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k',
'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv3x3_block, is_channels_first, interpolate_im, get_im_size
from .resnetd import resnetd50b, resnetd101b
class FCNFinalBlock(nn.Layer):
"""
FCN-8s(d) final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(FCNFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
self.data_format = data_format
mid_channels = in_channels // bottleneck_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, out_size, training=None):
x = self.conv1(x, training=training)
x = self.dropout(x, training=training)
x = self.conv2(x)
x = interpolate_im(x, out_size=out_size, data_format=self.data_format)
return x
class FCN8sd(tf.keras.Model):
"""
FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
It is an experimental model mixed FCN-8s and PSPNet.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
data_format="channels_last",
**kwargs):
super(FCN8sd, self).__init__(**kwargs)
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
data_format=data_format,
name="final_block")
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = FCNFinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
data_format=data_format,
name="aux_block")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
x, y = self.backbone(x, training=training)
x = self.final_block(x, in_size, training=training)
if self.aux:
y = self.aux_block(y, in_size, training=training)
return x, y
else:
return x
def get_fcn8sd(backbone,
classes,
aux=False,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create FCN-8s(d) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = FCN8sd(
backbone=backbone,
classes=classes,
aux=aux,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def fcn8sd_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_voc",
data_format=data_format, **kwargs)
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_voc",
data_format=data_format, **kwargs)
def fcn8sd_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_coco",
data_format=data_format, **kwargs)
def fcn8sd_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_coco",
data_format=data_format, **kwargs)
def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k",
data_format=data_format, **kwargs)
def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last", **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k",
data_format=data_format, **kwargs)
def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes",
data_format=data_format, **kwargs)
def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes",
data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (480, 480)
aux = False
pretrained = False
models = [
(fcn8sd_resnetd50b_voc, 21),
(fcn8sd_resnetd101b_voc, 21),
(fcn8sd_resnetd50b_coco, 21),
(fcn8sd_resnetd101b_coco, 21),
(fcn8sd_resnetd50b_ade20k, 150),
(fcn8sd_resnetd101b_ade20k, 150),
(fcn8sd_resnetd50b_cityscapes, 19),
(fcn8sd_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582)
else:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891)
if __name__ == "__main__":
_test()
| 19,136 | 40.154839 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/selecsls.py | """
SelecSLS for ImageNet-1K, implemented in TensorFlow.
Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
"""
__all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, DualPathSequential, AvgPool2d, SimpleSequential, flatten,\
is_channels_first, get_channel_axis
class SelecSLSBlock(nn.Layer):
"""
SelecSLS block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(SelecSLSBlock, self).__init__(**kwargs)
mid_channels = 2 * out_channels
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class SelecSLSUnit(nn.Layer):
"""
SelecSLS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
skip_channels : int
Number of skipped channels.
mid_channels : int
Number of middle channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
skip_channels,
mid_channels,
strides,
data_format="channels_last",
**kwargs):
super(SelecSLSUnit, self).__init__(**kwargs)
self.data_format = data_format
self.resize = (strides == 2)
mid2_channels = mid_channels // 2
last_channels = 2 * mid_channels + (skip_channels if strides == 1 else 0)
self.branch1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="branch1")
self.branch2 = SelecSLSBlock(
in_channels=mid_channels,
out_channels=mid2_channels,
data_format=data_format,
name="branch2")
self.branch3 = SelecSLSBlock(
in_channels=mid2_channels,
out_channels=mid2_channels,
data_format=data_format,
name="branch3")
self.last_conv = conv1x1_block(
in_channels=last_channels,
out_channels=out_channels,
data_format=data_format,
name="last_conv")
def call(self, x, x0=None, training=None):
x1 = self.branch1(x, training=training)
x2 = self.branch2(x1, training=training)
x3 = self.branch3(x2, training=training)
if self.resize:
y = tf.concat([x1, x2, x3], axis=get_channel_axis(self.data_format))
y = self.last_conv(y, training=training)
return y, y
else:
y = tf.concat([x1, x2, x3, x0], axis=get_channel_axis(self.data_format))
y = self.last_conv(y, training=training)
return y, x0
class SelecSLS(tf.keras.Model):
"""
SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
skip_channels : list of list of int
Number of skipped channels for each unit.
mid_channels : list of list of int
Number of middle channels for each unit.
kernels3 : list of list of int/bool
Using 3x3 (instead of 1x1) kernel for each head unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
skip_channels,
mid_channels,
kernels3,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SelecSLS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
init_block_channels = 32
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=(1 + len(kernels3)),
name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
k = i - len(skip_channels)
stage = DualPathSequential(name="stage{}".format(i + 1)) if k < 0 else\
SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if j == 0 else 1
if k < 0:
unit = SelecSLSUnit(
in_channels=in_channels,
out_channels=out_channels,
skip_channels=skip_channels[i][j],
mid_channels=mid_channels[i][j],
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1))
else:
conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block
unit = conv_block_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1))
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(AvgPool2d(
pool_size=4,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_selecsls(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SelecSLS model with specific parameters.
Parameters:
----------
version : str
Version of SelecSLS.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version in ("42", "42b"):
channels = [[64, 128], [144, 288], [304, 480]]
skip_channels = [[0, 64], [0, 144], [0, 304]]
mid_channels = [[64, 64], [144, 144], [304, 304]]
kernels3 = [[1, 1], [1, 0]]
if version == "42":
head_channels = [[960, 1024], [1024, 1280]]
else:
head_channels = [[960, 1024], [1280, 1024]]
elif version in ("60", "60b"):
channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]]
skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]]
mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]]
kernels3 = [[1, 1], [1, 0]]
if version == "60":
head_channels = [[756, 1024], [1024, 1280]]
else:
head_channels = [[756, 1024], [1280, 1024]]
elif version == "84":
channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]]
skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]]
mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]]
kernels3 = [[1, 1], [1, 1]]
head_channels = [[960, 1024], [1024, 1280]]
else:
raise ValueError("Unsupported SelecSLS version {}".format(version))
channels += head_channels
net = SelecSLS(
channels=channels,
skip_channels=skip_channels,
mid_channels=mid_channels,
kernels3=kernels3,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def selecsls42(**kwargs):
"""
SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42", model_name="selecsls42", **kwargs)
def selecsls42b(**kwargs):
"""
SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42b", model_name="selecsls42b", **kwargs)
def selecsls60(**kwargs):
"""
SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60", model_name="selecsls60", **kwargs)
def selecsls60b(**kwargs):
"""
SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60b", model_name="selecsls60b", **kwargs)
def selecsls84(**kwargs):
"""
SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="84", model_name="selecsls84", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
selecsls42,
selecsls42b,
selecsls60,
selecsls60b,
selecsls84,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != selecsls42 or weight_count == 30354952)
assert (model != selecsls42b or weight_count == 32458248)
assert (model != selecsls60 or weight_count == 30670768)
assert (model != selecsls60b or weight_count == 32774064)
assert (model != selecsls84 or weight_count == 50954600)
if __name__ == "__main__":
_test()
| 13,913 | 33.698254 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/inceptionv4.py | """
InceptionV4 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import ConvBlock, conv3x3_block, SimpleSequential, Concurrent, flatten, is_channels_first, get_channel_axis
from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
class Conv3x3Branch(nn.Layer):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.conv(x, training=training)
return x
class ConvSeq3x3Branch(nn.Layer):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
self.data_format = data_format
self.conv_list = SimpleSequential(name="conv_list")
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.children.append(ConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_eps=bn_eps,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = mid_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="conv1x3")
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="conv3x1")
def call(self, x, training=None):
x = self.conv_list(x, training=training)
y1 = self.conv1x3(x, training=training)
y2 = self.conv3x1(x, training=training)
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
return x
class InceptionAUnit(nn.Layer):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps,
count_include_pad=False,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionAUnit(nn.Layer):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionBUnit(nn.Layer):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
in_channels = 1024
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
bn_eps=bn_eps,
count_include_pad=False,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1024
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptionCUnit(nn.Layer):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
in_channels = 1536
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps,
count_include_pad=False,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock3a(nn.Layer):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptBlock3a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch1"))
self.branches.children.append(Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock4a(nn.Layer):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptBlock4a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptBlock5a(nn.Layer):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptBlock5a, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch2"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_eps=bn_eps,
data_format=data_format,
name="conv3")
self.block1 = InceptBlock3a(
bn_eps=bn_eps,
data_format=data_format,
name="block1")
self.block2 = InceptBlock4a(
bn_eps=bn_eps,
data_format=data_format,
name="block2")
self.block3 = InceptBlock5a(
bn_eps=bn_eps,
data_format=data_format,
name="block3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.block1(x, training=training)
x = self.block2(x, training=training)
x = self.block3(x, training=training)
return x
class InceptionV4(tf.keras.Model):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionV4, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
stage.add(unit(
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1)))
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="output1/dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=1536,
name="output1/fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv4 or weight_count == 42679816)
if __name__ == "__main__":
_test()
| 23,613 | 31.303694 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/regnet.py | """
RegNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040',
'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004',
'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080',
'regnety120', 'regnety160', 'regnety320']
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SEBlock, SimpleSequential, is_channels_first
class RegNetBottleneck(nn.Layer):
"""
RegNet bottleneck block for residual path in RegNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bottleneck_factor : int, default 1
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
bottleneck_factor=1,
data_format="channels_last",
**kwargs):
super(RegNetBottleneck, self).__init__(**kwargs)
self.use_se = use_se
mid_channels = out_channels // bottleneck_factor
mid_groups = mid_channels // groups
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
groups=mid_groups,
data_format=data_format,
name="conv2")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
mid_channels=(in_channels // 4),
data_format=data_format,
name="se")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_se:
x = self.se(x)
x = self.conv3(x, training=training)
return x
class RegNetUnit(nn.Layer):
"""
RegNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
data_format="channels_last",
**kwargs):
super(RegNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = RegNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
groups=groups,
use_se=use_se,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class RegNet(tf.keras.Model):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : list of int
Number of groups for each stage.
use_se : bool
Whether to use SE-module.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
groups,
use_se,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(RegNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=1,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
stage.add(RegNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=stride,
groups=groups_per_stage,
use_se=use_se,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
groups,
use_se=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
groups : int
Number of groups.
depth : int
Depth value.
use_se : bool, default False
Whether to use SE-module.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
# Adjusts the compatibility of channels/widths and groups:
groups_per_stage = [min(groups, c) for c in channels_per_stage]
channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)]
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
net = RegNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups_per_stage,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def regnetx002(**kwargs):
"""
RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8,
model_name="regnetx002", **kwargs)
def regnetx004(**kwargs):
"""
RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16,
model_name="regnetx004", **kwargs)
def regnetx006(**kwargs):
"""
RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24,
model_name="regnetx006", **kwargs)
def regnetx008(**kwargs):
"""
RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16,
model_name="regnetx008", **kwargs)
def regnetx016(**kwargs):
"""
RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24,
model_name="regnetx016", **kwargs)
def regnetx032(**kwargs):
"""
RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48,
model_name="regnetx032", **kwargs)
def regnetx040(**kwargs):
"""
RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40,
model_name="regnetx040", **kwargs)
def regnetx064(**kwargs):
"""
RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56,
model_name="regnetx064", **kwargs)
def regnetx080(**kwargs):
"""
RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120,
model_name="regnetx080", **kwargs)
def regnetx120(**kwargs):
"""
RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112,
model_name="regnetx120", **kwargs)
def regnetx160(**kwargs):
"""
RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128,
model_name="regnetx160", **kwargs)
def regnetx320(**kwargs):
"""
RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168,
model_name="regnetx320", **kwargs)
def regnety002(**kwargs):
"""
RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True,
model_name="regnety002", **kwargs)
def regnety004(**kwargs):
"""
RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True,
model_name="regnety004", **kwargs)
def regnety006(**kwargs):
"""
RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True,
model_name="regnety006", **kwargs)
def regnety008(**kwargs):
"""
RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True,
model_name="regnety008", **kwargs)
def regnety016(**kwargs):
"""
RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True,
model_name="regnety016", **kwargs)
def regnety032(**kwargs):
"""
RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True,
model_name="regnety032", **kwargs)
def regnety040(**kwargs):
"""
RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True,
model_name="regnety040", **kwargs)
def regnety064(**kwargs):
"""
RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True,
model_name="regnety064", **kwargs)
def regnety080(**kwargs):
"""
RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True,
model_name="regnety080", **kwargs)
def regnety120(**kwargs):
"""
RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True,
model_name="regnety120", **kwargs)
def regnety160(**kwargs):
"""
RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True,
model_name="regnety160", **kwargs)
def regnety320(**kwargs):
"""
RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True,
model_name="regnety320", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
regnetx002,
regnetx004,
regnetx006,
regnetx008,
regnetx016,
regnetx032,
regnetx040,
regnetx064,
regnetx080,
regnetx120,
regnetx160,
regnetx320,
regnety002,
regnety004,
regnety006,
regnety008,
regnety016,
regnety032,
regnety040,
regnety064,
regnety080,
regnety120,
regnety160,
regnety320,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
size = 224
x = tf.random.normal((batch, 3, size, size) if is_channels_first(data_format) else (batch, size, size, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx002 or weight_count == 2684792)
assert (model != regnetx004 or weight_count == 5157512)
assert (model != regnetx006 or weight_count == 6196040)
assert (model != regnetx008 or weight_count == 7259656)
assert (model != regnetx016 or weight_count == 9190136)
assert (model != regnetx032 or weight_count == 15296552)
assert (model != regnetx040 or weight_count == 22118248)
assert (model != regnetx064 or weight_count == 26209256)
assert (model != regnetx080 or weight_count == 39572648)
assert (model != regnetx120 or weight_count == 46106056)
assert (model != regnetx160 or weight_count == 54278536)
assert (model != regnetx320 or weight_count == 107811560)
assert (model != regnety002 or weight_count == 3162996)
assert (model != regnety004 or weight_count == 4344144)
assert (model != regnety006 or weight_count == 6055160)
assert (model != regnety008 or weight_count == 6263168)
assert (model != regnety016 or weight_count == 11202430)
assert (model != regnety032 or weight_count == 19436338)
assert (model != regnety040 or weight_count == 20646656)
assert (model != regnety064 or weight_count == 30583252)
assert (model != regnety080 or weight_count == 39180068)
assert (model != regnety120 or weight_count == 51822544)
assert (model != regnety160 or weight_count == 83590140)
assert (model != regnety320 or weight_count == 145046770)
if __name__ == "__main__":
_test()
| 25,743 | 33.978261 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/icnet.py | """
ICNet for image segmentation, implemented in TensorFlow.
Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
"""
__all__ = ['ICNet', 'icnet_resnetd50b_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential, is_channels_first
from .pspnet import PyramidPooling
from .resnetd import resnetd50b
class ICInitBlock(nn.Layer):
"""
ICNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ICInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class PSPBlock(nn.Layer):
"""
ICNet specific PSPNet reduced head block.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bottleneck_factor : int
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
upscale_out_size,
bottleneck_factor,
data_format="channels_last",
**kwargs):
super(PSPBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
self.pool = PyramidPooling(
in_channels=in_channels,
upscale_out_size=upscale_out_size,
data_format=data_format,
name="pool")
self.conv = conv3x3_block(
in_channels=4096,
out_channels=mid_channels,
data_format=data_format,
name="conv")
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
def call(self, x, training=None):
x = self.pool(x, training=training)
x = self.conv(x, training=training)
x = self.dropout(x, training=training)
return x
class CFFBlock(nn.Layer):
"""
Cascade Feature Fusion block.
Parameters:
----------
in_channels_low : int
Number of input channels (low input).
in_channels_high : int
Number of input channels (low high).
out_channels : int
Number of output channels.
classes : int
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_low,
in_channels_high,
out_channels,
classes,
data_format="channels_last",
**kwargs):
super(CFFBlock, self).__init__(**kwargs)
self.up = InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up")
self.conv_low = conv3x3_block(
in_channels=in_channels_low,
out_channels=out_channels,
padding=2,
dilation=2,
activation=None,
data_format=data_format,
name="conv_low")
self.conv_hign = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv_hign")
self.activ = nn.ReLU()
self.conv_cls = conv1x1(
in_channels=out_channels,
out_channels=classes,
data_format=data_format,
name="conv_cls")
def call(self, xl, xh, training=None):
xl = self.up(xl)
xl = self.conv_low(xl, training=training)
xh = self.conv_hign(xh, training=training)
x = xl + xh
x = self.activ(x)
x_cls = self.conv_cls(xl)
return x, x_cls
class ICHeadBlock(nn.Layer):
"""
ICNet head block.
Parameters:
----------
classes : int
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
classes,
data_format="channels_last",
**kwargs):
super(ICHeadBlock, self).__init__(**kwargs)
self.cff_12 = CFFBlock(
in_channels_low=128,
in_channels_high=64,
out_channels=128,
classes=classes,
data_format=data_format,
name="cff_12")
self.cff_24 = CFFBlock(
in_channels_low=256,
in_channels_high=256,
out_channels=128,
classes=classes,
data_format=data_format,
name="cff_24")
self.up_x2 = InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up_x2")
self.up_x8 = InterpolationBlock(
scale_factor=4,
data_format=data_format,
name="up_x8")
self.conv_cls = conv1x1(
in_channels=128,
out_channels=classes,
data_format=data_format,
name="conv_cls")
def call(self, x1, x2, x4, training=None):
outputs = []
x_cff_24, x_24_cls = self.cff_24(x4, x2, training=training)
outputs.append(x_24_cls)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1, training=training)
outputs.append(x_12_cls)
up_x2 = self.up_x2(x_cff_12)
up_x2 = self.conv_cls(up_x2)
outputs.append(up_x2)
up_x8 = self.up_x8(up_x2)
outputs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outputs.reverse()
return tuple(outputs)
class ICNet(tf.keras.Model):
"""
ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : tuple of int
Number of output channels for each branch.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbones,
backbones_out_channels,
channels,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
data_format="channels_last",
**kwargs):
super(ICNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
psp_head_out_channels = 512
self.branch1 = ICInitBlock(
in_channels=in_channels,
out_channels=channels[0],
data_format=data_format,
name="branch1")
self.branch2 = MultiOutputSequential(name="branch2")
self.branch2.add(InterpolationBlock(
scale_factor=2,
up=False,
data_format=data_format,
name="down1"))
backbones[0].do_output = True
self.branch2.add(backbones[0])
self.branch2.add(InterpolationBlock(
scale_factor=2,
up=False,
data_format=data_format,
name="down2"))
self.branch2.add(backbones[1])
self.branch2.add(PSPBlock(
in_channels=backbones_out_channels[1],
upscale_out_size=psp_pool_out_size,
bottleneck_factor=4,
data_format=data_format,
name="psp"))
self.branch2.add(conv1x1_block(
in_channels=psp_head_out_channels,
out_channels=channels[2],
data_format=data_format,
name="final_block"))
self.conv_y2 = conv1x1_block(
in_channels=backbones_out_channels[0],
out_channels=channels[1],
data_format=data_format,
name="conv_y2")
self.final_block = ICHeadBlock(
classes=classes,
data_format=data_format,
name="final_block")
def call(self, x, training=None):
y1 = self.branch1(x, training=training)
y3, y2 = self.branch2(x, training=training)
y2 = self.conv_y2(y2, training=training)
x = self.final_block(y1, y2, y3, training=training)
if self.aux:
return x
else:
return x[0]
def get_icnet(backbones,
backbones_out_channels,
classes,
aux=False,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ICNet model with specific parameters.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = (64, 256, 256)
backbones[0].multi_output = False
backbones[1].multi_output = False
net = ICNet(
backbones=backbones,
backbones_out_channels=backbones_out_channels,
channels=channels,
classes=classes,
aux=aux,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def icnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last", **kwargs):
"""
ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on
High-Resolution Images,' https://arxiv.org/abs/1704.08545.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone1 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None,
data_format=data_format).features
for i in range(len(backbone1) - 3):
# backbone1.children.pop()
del backbone1.children[-1]
backbone2 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None,
data_format=data_format).features
# backbone2.children.pop()
del backbone2.children[-1]
for i in range(3):
# backbone2.children.pop(0)
del backbone2.children[0]
backbones = (backbone1, backbone2)
backbones_out_channels = (512, 2048)
return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, classes=classes,
aux=aux, model_name="icnet_resnetd50b_cityscapes", data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (480, 480)
aux = False
fixed_size = False
pretrained = False
models = [
(icnet_resnetd50b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184)
if __name__ == "__main__":
_test()
| 15,700 | 31.985294 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mobilenetb.py | """
MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4']
from .mobilenet import get_mobilenet
def mobilenetb_w1(**kwargs):
"""
1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs)
def mobilenetb_w3d4(**kwargs):
"""
0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs)
def mobilenetb_wd2(**kwargs):
"""
0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs)
def mobilenetb_wd4(**kwargs):
"""
0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
pretrained = False
models = [
mobilenetb_w1,
mobilenetb_w3d4,
mobilenetb_wd2,
mobilenetb_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetb_w1 or weight_count == 4222056)
assert (model != mobilenetb_w3d4 or weight_count == 2578120)
assert (model != mobilenetb_wd2 or weight_count == 1326632)
assert (model != mobilenetb_wd4 or weight_count == 467592)
if __name__ == "__main__":
_test()
| 3,684 | 34.095238 | 114 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/inceptionresnetv1.py | """
InceptionResNetV1 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, BatchNorm, conv1x1, conv1x1_block, conv3x3_block, Concurrent, flatten,\
is_channels_first, SimpleSequential
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
self.scale = 0.17
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
self.scale = 0.10
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(nn.Layer):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
scale : float, default 1.0
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
scale=0.2,
activate=True,
data_format="channels_last",
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
self.activate = activate
self.scale = scale
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True,
data_format=data_format,
name="conv")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
identity = x
x = self.branches(x, training=training)
x = self.conv(x, training=training)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(nn.Layer):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch3"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class ReductionBUnit(nn.Layer):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
data_format="channels_last",
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(MaxPoolBranch(
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
in_channels,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_eps=bn_eps,
data_format=data_format,
name="conv3")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool")
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv4")
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv5")
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv6")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool(x)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
return x
class InceptHead(nn.Layer):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_eps,
dropout_rate,
classes,
data_format="channels_last",
**kwargs):
super(InceptHead, self).__init__(**kwargs)
self.data_format = data_format
self.use_dropout = (dropout_rate != 0.0)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
self.fc1 = nn.Dense(
units=512,
input_dim=in_channels,
use_bias=False,
name="fc1")
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
self.fc2 = nn.Dense(
units=classes,
input_dim=512,
name="fc2")
def call(self, x, training=None):
x = flatten(x, self.data_format)
if self.use_dropout:
x = self.dropout(x, training=training)
x = self.fc1(x)
x = self.bn(x, training=training)
x = self.fc2(x)
return x
class InceptionResNetV1(tf.keras.Model):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionResNetV1, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1),
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = InceptHead(
in_channels=in_channels,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
classes=classes,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
if __name__ == "__main__":
_test()
| 20,969 | 32.127962 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/scnet.py | """
SCNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
"""
__all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, AvgPool2d, InterpolationBlock, SimpleSequential, get_channel_axis,\
get_im_size, is_channels_first
from .resnet import ResInitBlock
from .senet import SEInitBlock
from .resnesta import ResNeStADownBlock
class ScDownBlock(nn.Layer):
"""
SCNet specific convolutional downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_size: int or list/tuple of 2 ints, default 2
Size of the average pooling windows.
"""
def __init__(self,
in_channels,
out_channels,
pool_size=2,
data_format="channels_last",
**kwargs):
super(ScDownBlock, self).__init__(**kwargs)
self.pool = AvgPool2d(
pool_size=pool_size,
strides=pool_size,
data_format=data_format,
name="pool")
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class ScConv(nn.Layer):
"""
Self-calibrated convolutional block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
scale_factor : int
Scale factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
scale_factor,
data_format="channels_last",
**kwargs):
super(ScConv, self).__init__(**kwargs)
self.data_format = data_format
self.down = ScDownBlock(
in_channels=in_channels,
out_channels=out_channels,
pool_size=scale_factor,
data_format=data_format,
name="down")
self.up = InterpolationBlock(
scale_factor=scale_factor,
interpolation="nearest",
data_format=data_format,
name="up")
self.sigmoid = tf.nn.sigmoid
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
activation=None,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
in_size = get_im_size(x, data_format=self.data_format)
w = self.sigmoid(x + self.up(self.down(x, training=training), size=in_size))
x = self.conv1(x, training=training) * w
x = self.conv2(x, training=training)
return x
class ScBottleneck(nn.Layer):
"""
SCNet specific bottleneck block for residual path in SCNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
scale_factor : int, default 4
Scale factor.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=4,
scale_factor=4,
avg_downsample=False,
data_format="channels_last",
**kwargs):
super(ScBottleneck, self).__init__(**kwargs)
self.data_format = data_format
self.avg_resize = (strides > 1) and avg_downsample
mid_channels = out_channels // bottleneck_factor // 2
self.conv1a = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1a")
self.conv2a = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
data_format=data_format,
name="conv2a")
self.conv1b = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1b")
self.conv2b = ScConv(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
scale_factor=scale_factor,
data_format=data_format,
name="conv2b")
if self.avg_resize:
self.pool = AvgPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool")
self.conv3 = conv1x1_block(
in_channels=(2 * mid_channels),
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
y = self.conv1a(x, training=training)
y = self.conv2a(y, training=training)
z = self.conv1b(x, training=training)
z = self.conv2b(z, training=training)
if self.avg_resize:
y = self.pool(y)
z = self.pool(z)
x = tf.concat([y, z], axis=get_channel_axis(self.data_format))
x = self.conv3(x)
return x
class ScUnit(nn.Layer):
"""
SCNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
strides,
avg_downsample=False,
data_format="channels_last",
**kwargs):
super(ScUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = ScBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
data_format=data_format,
name="body")
if self.resize_identity:
if avg_downsample:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="identity_block")
else:
self.identity_block = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_block")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_block(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class SCNet(tf.keras.Model):
"""
SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
se_init_block=False,
avg_downsample=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SCNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
init_block_class = SEInitBlock if se_init_block else ResInitBlock
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ScUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_scnet(blocks,
width_scale=1.0,
se_init_block=False,
avg_downsample=False,
init_block_channels_scale=1,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SCNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_scale : float, default 1.0
Scale factor for width of layers.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
init_block_channels_scale : int, default 1
Scale factor for number of output channels in the initial unit.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
init_block_channels *= init_block_channels_scale
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = SCNet(
channels=channels,
init_block_channels=init_block_channels,
se_init_block=se_init_block,
avg_downsample=avg_downsample,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
def scnet101(**kwargs):
"""
SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, model_name="scnet101", **kwargs)
def scneta50(**kwargs):
"""
SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs)
def scneta101(**kwargs):
"""
SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2,
model_name="scneta101", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
scnet50,
scnet101,
scneta50,
scneta101,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != scnet50 or weight_count == 25564584)
assert (model != scnet101 or weight_count == 44565416)
assert (model != scneta50 or weight_count == 25583816)
assert (model != scneta101 or weight_count == 44689192)
if __name__ == "__main__":
_test()
| 17,161 | 31.751908 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/igcv3.py | """
IGCV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ReLU6, SimpleSequential, flatten
class InvResUnit(nn.Layer):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
expansion : bool
Whether do expansion of channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
expansion,
data_format="channels_last",
**kwargs):
super(InvResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
activation=None,
data_format=data_format,
name="conv1")
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups,
data_format=data_format,
name="c_shuffle")
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=ReLU6(),
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
if self.residual:
identity = x
x = self.conv1(x, training=training)
x = self.c_shuffle(x)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
if self.residual:
x = x + identity
return x
class IGCV3(tf.keras.Model):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IGCV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
activation=ReLU6(),
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(InvResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation=ReLU6(),
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
if __name__ == "__main__":
_test()
| 10,739 | 32.667712 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/seresnet_cifar.py | """
SE-ResNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn',
'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn',
'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn',
'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn',
'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn',
'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn',
'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn',
'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first
from .seresnet import SEResUnit
class CIFARSEResNet(tf.keras.Model):
"""
SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_seresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def seresnet20_cifar10(classes=10, **kwargs):
"""
SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs)
def seresnet20_cifar100(classes=100, **kwargs):
"""
SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs)
def seresnet20_svhn(classes=10, **kwargs):
"""
SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs)
def seresnet56_cifar10(classes=10, **kwargs):
"""
SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs)
def seresnet56_cifar100(classes=100, **kwargs):
"""
SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs)
def seresnet56_svhn(classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
def seresnet110_cifar10(classes=10, **kwargs):
"""
SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs)
def seresnet110_cifar100(classes=100, **kwargs):
"""
SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100",
**kwargs)
def seresnet110_svhn(classes=10, **kwargs):
"""
SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs)
def seresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10",
**kwargs)
def seresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100",
**kwargs)
def seresnet164bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs)
def seresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10",
**kwargs)
def seresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100",
**kwargs)
def seresnet272bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs)
def seresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10",
**kwargs)
def seresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100",
**kwargs)
def seresnet542bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs)
def seresnet1001_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10",
**kwargs)
def seresnet1001_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100",
**kwargs)
def seresnet1001_svhn(classes=10, **kwargs):
"""
SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs)
def seresnet1202_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10",
**kwargs)
def seresnet1202_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100",
**kwargs)
def seresnet1202_svhn(classes=10, **kwargs):
"""
SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(seresnet20_cifar10, 10),
(seresnet20_cifar100, 100),
(seresnet20_svhn, 10),
(seresnet56_cifar10, 10),
(seresnet56_cifar100, 100),
(seresnet56_svhn, 10),
(seresnet110_cifar10, 10),
(seresnet110_cifar100, 100),
(seresnet110_svhn, 10),
(seresnet164bn_cifar10, 10),
(seresnet164bn_cifar100, 100),
(seresnet164bn_svhn, 10),
(seresnet272bn_cifar10, 10),
(seresnet272bn_cifar100, 100),
(seresnet272bn_svhn, 10),
(seresnet542bn_cifar10, 10),
(seresnet542bn_cifar100, 100),
(seresnet542bn_svhn, 10),
(seresnet1001_cifar10, 10),
(seresnet1001_cifar100, 100),
(seresnet1001_svhn, 10),
(seresnet1202_cifar10, 10),
(seresnet1202_cifar100, 100),
(seresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet20_cifar10 or weight_count == 274847)
assert (model != seresnet20_cifar100 or weight_count == 280697)
assert (model != seresnet20_svhn or weight_count == 274847)
assert (model != seresnet56_cifar10 or weight_count == 862889)
assert (model != seresnet56_cifar100 or weight_count == 868739)
assert (model != seresnet56_svhn or weight_count == 862889)
assert (model != seresnet110_cifar10 or weight_count == 1744952)
assert (model != seresnet110_cifar100 or weight_count == 1750802)
assert (model != seresnet110_svhn or weight_count == 1744952)
assert (model != seresnet164bn_cifar10 or weight_count == 1906258)
assert (model != seresnet164bn_cifar100 or weight_count == 1929388)
assert (model != seresnet164bn_svhn or weight_count == 1906258)
assert (model != seresnet272bn_cifar10 or weight_count == 3153826)
assert (model != seresnet272bn_cifar100 or weight_count == 3176956)
assert (model != seresnet272bn_svhn or weight_count == 3153826)
assert (model != seresnet542bn_cifar10 or weight_count == 6272746)
assert (model != seresnet542bn_cifar100 or weight_count == 6295876)
assert (model != seresnet542bn_svhn or weight_count == 6272746)
assert (model != seresnet1001_cifar10 or weight_count == 11574910)
assert (model != seresnet1001_cifar100 or weight_count == 11598040)
assert (model != seresnet1001_svhn or weight_count == 11574910)
assert (model != seresnet1202_cifar10 or weight_count == 19582226)
assert (model != seresnet1202_cifar100 or weight_count == 19588076)
assert (model != seresnet1202_svhn or weight_count == 19582226)
if __name__ == "__main__":
_test()
| 23,745 | 36.692063 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnetd.py | """
ResNet(D) with dilation for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MultiOutputSequential, SimpleSequential, is_channels_first
from .resnet import ResUnit, ResInitBlock
from .senet import SEInitBlock
class ResNetD(tf.keras.Model):
"""
ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
ordinary_init : bool, default False
Whether to use original initial block or SENet one.
bends : tuple of int, default None
Numbers of bends for multiple output.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
ordinary_init=False,
bends=None,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNetD, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.multi_output = (bends is not None)
self.data_format = data_format
self.features = MultiOutputSequential(name="features")
if ordinary_init:
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
else:
init_block_channels = 2 * init_block_channels
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
if self.multi_output and ((i + 1) in bends):
stage.do_output = True
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
outs = self.features(x, training=training)
x = outs[0]
x = self.output1(x)
if self.multi_output:
return [x] + outs[1:]
else:
return x
def get_resnetd(blocks,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet(D) with dilation model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetD(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnetd50b(**kwargs):
"""
ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs)
def resnetd101b(**kwargs):
"""
ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs)
def resnetd152b(**kwargs):
"""
ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
ordinary_init = False
bends = None
pretrained = False
models = [
resnetd50b,
resnetd101b,
resnetd152b,
]
for model in models:
net = model(
pretrained=pretrained,
ordinary_init=ordinary_init,
bends=bends,
data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
if bends is not None:
y = y[0]
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if ordinary_init:
assert (model != resnetd50b or weight_count == 25557032)
assert (model != resnetd101b or weight_count == 44549160)
assert (model != resnetd152b or weight_count == 60192808)
else:
assert (model != resnetd50b or weight_count == 25680808)
assert (model != resnetd101b or weight_count == 44672936)
assert (model != resnetd152b or weight_count == 60316584)
if __name__ == "__main__":
_test()
| 10,194 | 34.034364 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/quartznet.py | """
QuartzNet for ASR, implemented in TensorFlow.
Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,'
https://arxiv.org/abs/1910.10261.
"""
__all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de',
'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru',
'quartznet15x5_ru34']
from .jasper import get_jasper
from .common import is_channels_first
def quartznet5x5_en_ls(classes=29, **kwargs):
"""
QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet5x5_en_ls", **kwargs)
def quartznet15x5_en(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en", **kwargs)
def quartznet15x5_en_nr(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en_nr", **kwargs)
def quartznet15x5_fr(classes=43, **kwargs):
"""
QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 43
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_fr", **kwargs)
def quartznet15x5_de(classes=32, **kwargs):
"""
QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 32
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_de", **kwargs)
def quartznet15x5_it(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_it", **kwargs)
def quartznet15x5_es(classes=36, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 36
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_es", **kwargs)
def quartznet15x5_ca(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ca", **kwargs)
def quartznet15x5_pl(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_pl", **kwargs)
def quartznet15x5_ru(classes=35, **kwargs):
"""
QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 35
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru", **kwargs)
def quartznet15x5_ru34(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D
Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru34", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
import tensorflow as tf
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
from_audio = True
audio_features = 64
models = [
quartznet5x5_en_ls,
quartznet15x5_en,
quartznet15x5_en_nr,
quartznet15x5_fr,
quartznet15x5_de,
quartznet15x5_it,
quartznet15x5_es,
quartznet15x5_ca,
quartznet15x5_pl,
quartznet15x5_ru,
quartznet15x5_ru34,
]
for model in models:
net = model(
in_channels=audio_features,
from_audio=from_audio,
pretrained=pretrained,
data_format=data_format)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (
(batch, audio_features, seq_len_max) if is_channels_first(data_format) else
(batch, seq_len_max, audio_features))
x = tf.random.normal(shape=x_shape)
x_len = tf.convert_to_tensor(seq_len.astype(np.long))
y, y_len = net(x, x_len)
assert (y.shape.as_list()[0] == batch)
classes_id = 1 if is_channels_first(data_format) else 2
seq_id = 2 if is_channels_first(data_format) else 1
assert (y.shape.as_list()[classes_id] == net.classes)
if from_audio:
assert (y.shape.as_list()[seq_id] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape.as_list()[seq_id] in [seq_len_max // 2, seq_len_max // 2 + 1])
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != quartznet5x5_en_ls or weight_count == 6713181)
assert (model != quartznet15x5_en or weight_count == 18924381)
assert (model != quartznet15x5_en_nr or weight_count == 18924381)
assert (model != quartznet15x5_fr or weight_count == 18938731)
assert (model != quartznet15x5_de or weight_count == 18927456)
assert (model != quartznet15x5_it or weight_count == 18934631)
assert (model != quartznet15x5_es or weight_count == 18931556)
assert (model != quartznet15x5_ca or weight_count == 18934631)
assert (model != quartznet15x5_pl or weight_count == 18929506)
assert (model != quartznet15x5_ru or weight_count == 18930531)
assert (model != quartznet15x5_ru34 or weight_count == 18929506)
if __name__ == "__main__":
_test()
| 13,642 | 43.439739 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/preresnet.py | """
PreResNet for ImageNet-1K, implemented in TensorFlow.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck',
'PreResUnit', 'PreResInitBlock', 'PreResActivation']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, pre_conv1x1_block, pre_conv3x3_block, conv1x1, MaxPool2d, BatchNorm, SimpleSequential,\
flatten
class PreResBlock(nn.Layer):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
data_format="channels_last",
**kwargs):
super(PreResBlock, self).__init__(**kwargs)
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
return_preact=True,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x, x_pre_activ = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x, x_pre_activ
class PreResBottleneck(nn.Layer):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
conv1_stride,
data_format="channels_last",
**kwargs):
super(PreResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
return_preact=True,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
data_format=data_format,
name="conv2")
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x, x_pre_activ = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x, x_pre_activ
class PreResUnit(nn.Layer):
"""
PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False,
data_format="channels_last",
**kwargs):
super(PreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
data_format=data_format,
name="identity_conv")
def call(self, x, training=None):
identity = x
x, x_pre_activ = self.body(x, training=training)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ, training=training)
x = x + identity
return x
class PreResInitBlock(nn.Layer):
"""
PreResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(PreResInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class PreResActivation(nn.Layer):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
data_format="channels_last",
**kwargs):
super(PreResActivation, self).__init__(**kwargs)
assert (in_channels is not None)
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.bn(x, training=training)
x = self.activ(x)
return x
class PreResNet(tf.keras.Model):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(PreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
if __name__ == "__main__":
_test()
| 28,922 | 33.107311 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/lednet.py | """
LEDNet for image segmentation, implemented in TensorFlow.
Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
"""
__all__ = ['LEDNet', 'lednet_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3, conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, ConvBlock, NormActivation,\
ChannelShuffle, InterpolationBlock, Hourglass, BreakBlock, SimpleSequential, MaxPool2d, is_channels_first,\
get_channel_axis, get_im_size
class AsymConvBlock(nn.Layer):
"""
Asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
lw_activation : function or str or None, default 'relu'
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default 'relu'
Activation function after the rightwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_size,
padding,
dilation=1,
groups=1,
use_bias=False,
lw_use_bn=True,
rw_use_bn=True,
bn_eps=1e-5,
lw_activation="relu",
rw_activation="relu",
data_format="channels_last",
**kwargs):
super(AsymConvBlock, self).__init__(**kwargs)
self.lw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=1,
padding=(padding, 0),
dilation=(dilation, 1),
groups=groups,
use_bias=use_bias,
use_bn=lw_use_bn,
bn_eps=bn_eps,
activation=lw_activation,
data_format=data_format,
name="lw_conv")
self.rw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=1,
padding=(0, padding),
dilation=(1, dilation),
groups=groups,
use_bias=use_bias,
use_bn=rw_use_bn,
bn_eps=bn_eps,
activation=rw_activation,
data_format=data_format,
name="rw_conv")
def call(self, x, training=None):
x = self.lw_conv(x, training=training)
x = self.rw_conv(x, training=training)
return x
def asym_conv3x3_block(padding=1,
**kwargs):
"""
3x3 asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
lw_activation : function or str or None, default 'relu'
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default 'relu'
Activation function after the rightwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return AsymConvBlock(
kernel_size=3,
padding=padding,
**kwargs)
class LEDDownBlock(nn.Layer):
"""
LEDNet specific downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
correct_size_mismatch,
bn_eps,
data_format="channels_last",
**kwargs):
super(LEDDownBlock, self).__init__(**kwargs)
self.correct_size_mismatch = correct_size_mismatch
self.data_format = data_format
self.axis = get_channel_axis(data_format)
self.pool = MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="pool")
self.conv = conv3x3(
in_channels=in_channels,
out_channels=(out_channels - in_channels),
strides=2,
use_bias=True,
data_format=data_format,
name="conv")
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="norm_activ")
def call(self, x, training=None):
y1 = self.pool(x)
y2 = self.conv(x)
if self.correct_size_mismatch:
if self.data_format == "channels_last":
diff_h = y2.size()[1] - y1.size()[1]
diff_w = y2.size()[2] - y1.size()[2]
else:
diff_h = y2.size()[2] - y1.size()[2]
diff_w = y2.size()[3] - y1.size()[3]
y1 = nn.ZeroPadding2D(
padding=((diff_w // 2, diff_w - diff_w // 2), (diff_h // 2, diff_h - diff_h // 2)),
data_format=self.data_format)(y1)
x = tf.concat([y2, y1], axis=self.axis)
x = self.norm_activ(x, training=training)
return x
class LEDBranch(nn.Layer):
"""
LEDNet encoder branch.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_eps,
data_format="channels_last",
**kwargs):
super(LEDBranch, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
self.conv1 = asym_conv3x3_block(
channels=channels,
use_bias=True,
lw_use_bn=False,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = asym_conv3x3_block(
channels=channels,
padding=dilation,
dilation=dilation,
use_bias=True,
lw_use_bn=False,
bn_eps=bn_eps,
rw_activation=None,
data_format=data_format,
name="conv2")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_dropout:
x = self.dropout(x, training=training)
return x
class LEDUnit(nn.Layer):
"""
LEDNet encoder unit (Split-Shuffle-non-bottleneck).
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_eps,
data_format="channels_last",
**kwargs):
super(LEDUnit, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
mid_channels = channels // 2
self.left_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps,
data_format=data_format,
name="left_branch")
self.right_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps,
data_format=data_format,
name="right_branch")
self.activ = nn.ReLU()
self.shuffle = ChannelShuffle(
channels=channels,
groups=2,
data_format=data_format,
name="shuffle")
def call(self, x, training=None):
identity = x
x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis)
x1 = self.left_branch(x1, training=training)
x2 = self.right_branch(x2, training=training)
x = tf.concat([x1, x2], axis=self.axis)
x = x + identity
x = self.activ(x)
x = self.shuffle(x)
return x
class PoolingBranch(nn.Layer):
"""
Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
bn_eps : float
Small float added to variance in Batch norm.
in_size : tuple of 2 int or None
Spatial size of input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
bn_eps,
in_size,
data_format="channels_last",
**kwargs):
super(PoolingBranch, self).__init__(**kwargs)
self.in_size = in_size
self.data_format = data_format
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
bn_eps=bn_eps,
data_format=data_format,
name="conv")
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size,
data_format=data_format,
name="up")
def call(self, x, training=None):
in_size = self.in_size if self.in_size is not None else get_im_size(x, data_format=self.data_format)
x = self.pool(x)
axis = -1 if is_channels_first(self.data_format) else 1
x = tf.expand_dims(tf.expand_dims(x, axis=axis), axis=axis)
x = self.conv(x, training=training)
x = self.up(x, size=in_size)
return x
class APN(nn.Layer):
"""
Attention pyramid network block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
in_size : tuple of 2 int or None
Spatial size of input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
in_size,
data_format="channels_last",
**kwargs):
super(APN, self).__init__(**kwargs)
self.in_size = in_size
att_out_channels = 1
self.pool_branch = PoolingBranch(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_eps=bn_eps,
in_size=in_size,
data_format=data_format,
name="pool_branch")
self.body = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="body")
down_seq = SimpleSequential(name="down_seq")
down_seq.add(conv7x7_block(
in_channels=in_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="down1"))
down_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="down2"))
down3_subseq = SimpleSequential(name="down3")
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="conv1"))
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="conv2"))
down_seq.add(down3_subseq)
up_seq = SimpleSequential(name="up_seq")
up_seq.add(InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up1"))
up_seq.add(InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up2"))
up_seq.add(InterpolationBlock(
scale_factor=2,
data_format=data_format,
name="up3"))
skip_seq = SimpleSequential(name="skip_seq")
skip_seq.add(BreakBlock(name="skip1"))
skip_seq.add(conv7x7_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="skip2"))
skip_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_eps=bn_eps,
data_format=data_format,
name="skip3"))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
data_format=data_format,
name="hg")
def call(self, x, training=None):
y = self.pool_branch(x, training=training)
w = self.hg(x, training=training)
x = self.body(x, training=training)
x = x * w
x = x + y
return x
class LEDNet(tf.keras.Model):
"""
LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
dilations : list of int
Dilations for units.
dropout_rates : list of list of int
Dropout rates for each unit in encoder.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images in encoder.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
dilations,
dropout_rates,
correct_size_mismatch=False,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
data_format="channels_last",
**kwargs):
super(LEDNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.encoder = SimpleSequential(name="encoder")
for i, dilations_per_stage in enumerate(dilations):
out_channels = channels[i]
dropout_rate = dropout_rates[i]
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, dilation in enumerate(dilations_per_stage):
if j == 0:
stage.add(LEDDownBlock(
in_channels=in_channels,
out_channels=out_channels,
correct_size_mismatch=correct_size_mismatch,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
else:
stage.add(LEDUnit(
channels=in_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1)))
self.encoder.add(stage)
self.apn = APN(
in_channels=in_channels,
out_channels=classes,
bn_eps=bn_eps,
in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None,
data_format=data_format,
name="apn")
self.up = InterpolationBlock(
scale_factor=8,
data_format=data_format,
name="up")
def call(self, x, training=None):
x = self.encoder(x, training=training)
x = self.apn(x, training=training)
x = self.up(x, training=training)
return x
def get_lednet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create LEDNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [32, 64, 128]
dilations = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]]
dropout_rates = [0.03, 0.03, 0.3]
bn_eps = 1e-3
net = LEDNet(
channels=channels,
dilations=dilations,
dropout_rates=dropout_rates,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def lednet_cityscapes(classes=19, **kwargs):
"""
LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic
Segmentation,' https://arxiv.org/abs/1905.02423.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lednet(classes=classes, model_name="lednet_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
fixed_size = True
correct_size_mismatch = False
in_size = (1024, 2048)
classes = 19
models = [
lednet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size,
correct_size_mismatch=correct_size_mismatch, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format)
else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lednet_cityscapes or weight_count == 922821)
if __name__ == "__main__":
_test()
| 22,964 | 31.94835 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ibndensenet.py | """
IBN-DenseNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNDenseNet', 'ibn_densenet121', 'ibn_densenet161', 'ibn_densenet169', 'ibn_densenet201']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, BatchNorm, pre_conv3x3_block, IBN, SimpleSequential, flatten, is_channels_first,\
get_channel_axis
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class IBNPreConvBlock(nn.Layer):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_ibn=False,
return_preact=False,
data_format="channels_last",
**kwargs):
super(IBNPreConvBlock, self).__init__(**kwargs)
self.use_ibn = use_ibn
self.return_preact = return_preact
if self.use_ibn:
self.ibn = IBN(
channels=in_channels,
first_fraction=0.6,
inst_first=False,
data_format=data_format,
name="ibn")
else:
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name="conv")
def call(self, x, training=None):
if self.use_ibn:
x = self.ibn(x, training=training)
else:
x = self.bn(x, training=training)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x, training=training)
if self.return_preact:
return x, x_pre_activ
else:
return x
def ibn_pre_conv1x1_block(in_channels,
out_channels,
strides=1,
use_ibn=False,
return_preact=False,
data_format="channels_last",
**kwargs):
"""
1x1 version of the IBN-Net specific pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
return_preact : bool, default False
Whether return pre-activation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return IBNPreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_ibn=use_ibn,
return_preact=return_preact,
data_format=data_format,
**kwargs)
class IBNDenseUnit(nn.Layer):
"""
IBN-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
conv1_ibn,
data_format="channels_last",
**kwargs):
super(IBNDenseUnit, self).__init__(**kwargs)
self.data_format = data_format
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = ibn_pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
data_format=data_format,
name="conv2")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
identity = x
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_dropout:
x = self.dropout(x, training=training)
x = tf.concat([identity, x], axis=get_channel_axis(self.data_format))
return x
class IBNDenseNet(tf.keras.Model):
"""
IBN-DenseNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IBNDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
data_format=data_format,
name="trans{}".format(i + 1)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
conv1_ibn = (i < 3) and (j % 3 == 0)
stage.add(IBNDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
conv1_ibn=conv1_ibn,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_ibndensenet(num_layers,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IBN-DenseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported IBN-DenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = IBNDenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ibn_densenet121(**kwargs):
"""
IBN-DenseNet-121 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=121, model_name="ibn_densenet121", **kwargs)
def ibn_densenet161(**kwargs):
"""
IBN-DenseNet-161 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=161, model_name="ibn_densenet161", **kwargs)
def ibn_densenet169(**kwargs):
"""
IBN-DenseNet-169 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=169, model_name="ibn_densenet169", **kwargs)
def ibn_densenet201(**kwargs):
"""
IBN-DenseNet-201 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=201, model_name="ibn_densenet201", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ibn_densenet121,
ibn_densenet161,
ibn_densenet169,
ibn_densenet201,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_densenet121 or weight_count == 7978856)
assert (model != ibn_densenet161 or weight_count == 28681000)
assert (model != ibn_densenet169 or weight_count == 14149480)
assert (model != ibn_densenet201 or weight_count == 20013928)
if __name__ == "__main__":
_test()
| 14,434 | 32.414352 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/hardnet.py | """
HarDNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
"""
__all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block, MaxPool2d, SimpleSequential,\
flatten, get_channel_axis, is_channels_first
class InvDwsConvBlock(nn.Layer):
"""
Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
pw_activation="relu",
dw_activation="relu",
data_format="channels_last",
**kwargs):
super(InvDwsConvBlock, self).__init__(**kwargs)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation,
data_format=data_format,
name="pw_conv")
self.dw_conv = dwconv_block(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation,
data_format=data_format,
name="dw_conv")
def call(self, x, training=None):
x = self.pw_conv(x, training=training)
x = self.dw_conv(x, training=training)
return x
def invdwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
pw_activation="relu",
dw_activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 inverse depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return InvDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
pw_activation=pw_activation,
dw_activation=dw_activation,
data_format=data_format,
**kwargs)
class HarDUnit(nn.Layer):
"""
HarDNet unit.
Parameters:
----------
in_channels_list : list of int
Number of input channels for each block.
out_channels_list : list of int
Number of output channels for each block.
links_list : list of list of int
List of indices for each layer.
use_deptwise : bool
Whether to use depthwise downsampling.
use_dropout : bool
Whether to use dropout module.
downsampling : bool
Whether to downsample input.
activation : str
Name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels_list,
out_channels_list,
links_list,
use_deptwise,
use_dropout,
downsampling,
activation,
data_format="channels_last",
**kwargs):
super(HarDUnit, self).__init__(**kwargs)
self.data_format = data_format
self.links_list = links_list
self.use_dropout = use_dropout
self.downsampling = downsampling
self.blocks = SimpleSequential(name="blocks")
for i in range(len(links_list)):
in_channels = in_channels_list[i]
out_channels = out_channels_list[i]
if use_deptwise:
unit = invdwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
pw_activation=activation,
dw_activation=None,
data_format=data_format,
name="block{}".format(i + 1))
else:
unit = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="block{}".format(i + 1))
self.blocks.add(unit)
if self.use_dropout:
self.dropout = nn.Dropout(
rate=0.1,
name="dropout")
self.conv = conv1x1_block(
in_channels=in_channels_list[-1],
out_channels=out_channels_list[-1],
activation=activation,
data_format=data_format,
name="conv")
if self.downsampling:
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels_list[-1],
out_channels=out_channels_list[-1],
strides=2,
activation=None,
data_format=data_format,
name="downsample")
else:
self.downsample = MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="downsample")
def call(self, x, training=None):
axis = get_channel_axis(self.data_format)
layer_outs = [x]
for links_i, layer_i in zip(self.links_list, self.blocks.children):
layer_in = []
for idx_ij in links_i:
layer_in.append(layer_outs[idx_ij])
if len(layer_in) > 1:
x = tf.concat(layer_in, axis=axis)
else:
x = layer_in[0]
out = layer_i(x, training=training)
layer_outs.append(out)
outs = []
for i, layer_out_i in enumerate(layer_outs):
if (i == len(layer_outs) - 1) or (i % 2 == 1):
outs.append(layer_out_i)
x = tf.concat(outs, axis=axis)
if self.use_dropout:
x = self.dropout(x, training=training)
x = self.conv(x, training=training)
if self.downsampling:
x = self.downsample(x, training=training)
return x
class HarDInitBlock(nn.Layer):
"""
HarDNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_deptwise : bool
Whether to use depthwise downsampling.
activation : str
Name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_deptwise,
activation,
data_format="channels_last",
**kwargs):
super(HarDInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
activation=activation,
data_format=data_format,
name="conv1")
conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block
self.conv2 = conv2_block_class(
in_channels=mid_channels,
out_channels=out_channels,
activation=activation,
data_format=data_format,
name="conv2")
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
strides=2,
activation=None,
data_format=data_format,
name="downsample")
else:
self.downsample = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="downsample")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.downsample(x, training=training)
return x
class HarDNet(tf.keras.Model):
"""
HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
init_block_channels : int
Number of output channels for the initial unit.
unit_in_channels : list of list of list of int
Number of input channels for each layer in each stage.
unit_out_channels : list list of of list of int
Number of output channels for each layer in each stage.
unit_links : list of list of list of int
List of indices for each layer in each stage.
use_deptwise : bool
Whether to use depthwise downsampling.
use_last_dropout : bool
Whether to use dropouts in the last unit.
output_dropout_rate : float
Parameter of Dropout layer before classifier. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
init_block_channels,
unit_in_channels,
unit_out_channels,
unit_links,
use_deptwise,
use_last_dropout,
output_dropout_rate,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(HarDNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
activation = "relu6"
self.features = SimpleSequential(name="features")
self.features.add(HarDInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
use_deptwise=use_deptwise,
activation=activation,
data_format=data_format,
name="init_block"))
for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i,
out_channels_list_i)):
use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and
use_last_dropout)
downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1))
stage.add(HarDUnit(
in_channels_list=in_channels_list_ij,
out_channels_list=out_channels_list_ij,
links_list=unit_links[i][j],
use_deptwise=use_deptwise,
use_dropout=use_dropout,
downsampling=downsampling,
activation=activation,
data_format=data_format,
name="unit{}".format(j + 1)))
self.features.add(stage)
in_channels = unit_out_channels[-1][-1][-1]
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=output_dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_hardnet(blocks,
use_deptwise=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create HarDNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_deepwise : bool, default True
Whether to use depthwise separable version of the model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 39:
init_block_channels = 48
growth_factor = 1.6
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [4, 16, 8, 4]
channels_per_layers = [96, 320, 640, 1024]
growth_rates = [16, 20, 64, 160]
downsamples = [1, 1, 1, 0]
use_dropout = False
elif blocks == 68:
init_block_channels = 64
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [8, 16, 16, 16, 4]
channels_per_layers = [128, 256, 320, 640, 1024]
growth_rates = [14, 16, 20, 40, 160]
downsamples = [1, 0, 1, 1, 0]
use_dropout = False
elif blocks == 85:
init_block_channels = 96
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.2
layers = [8, 16, 16, 16, 16, 4]
channels_per_layers = [192, 256, 320, 480, 720, 1280]
growth_rates = [24, 24, 28, 36, 48, 256]
downsamples = [1, 0, 1, 0, 1, 0]
use_dropout = True
else:
raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks))
assert (downsamples[-1] == 0)
def calc_stage_params():
def calc_unit_params():
def calc_blocks_params(layer_idx,
base_channels,
growth_rate):
if layer_idx == 0:
return base_channels, 0, []
out_channels_ij = growth_rate
links_ij = []
for k in range(10):
dv = 2 ** k
if layer_idx % dv == 0:
t = layer_idx - dv
links_ij.append(t)
if k > 0:
out_channels_ij *= growth_factor
out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2
in_channels_ij = 0
for t in links_ij:
out_channels_ik, _, _ = calc_blocks_params(
layer_idx=t,
base_channels=base_channels,
growth_rate=growth_rate)
in_channels_ij += out_channels_ik
return out_channels_ij, in_channels_ij, links_ij
unit_out_channels = []
unit_in_channels = []
unit_links = []
for num_layers, growth_rate, base_channels, channels_per_layers_i in zip(
layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers):
stage_out_channels_i = 0
unit_out_channels_i = []
unit_in_channels_i = []
unit_links_i = []
for j in range(num_layers):
out_channels_ij, in_channels_ij, links_ij = calc_blocks_params(
layer_idx=(j + 1),
base_channels=base_channels,
growth_rate=growth_rate)
unit_out_channels_i.append(out_channels_ij)
unit_in_channels_i.append(in_channels_ij)
unit_links_i.append(links_ij)
if (j % 2 == 0) or (j == num_layers - 1):
stage_out_channels_i += out_channels_ij
unit_in_channels_i.append(stage_out_channels_i)
unit_out_channels_i.append(channels_per_layers_i)
unit_out_channels.append(unit_out_channels_i)
unit_in_channels.append(unit_in_channels_i)
unit_links.append(unit_links_i)
return unit_out_channels, unit_in_channels, unit_links
unit_out_channels, unit_in_channels, unit_links = calc_unit_params()
stage_out_channels = []
stage_in_channels = []
stage_links = []
stage_out_channels_k = None
for i in range(len(layers)):
if stage_out_channels_k is None:
stage_out_channels_k = []
stage_in_channels_k = []
stage_links_k = []
stage_out_channels_k.append(unit_out_channels[i])
stage_in_channels_k.append(unit_in_channels[i])
stage_links_k.append(unit_links[i])
if (downsamples[i] == 1) or (i == len(layers) - 1):
stage_out_channels.append(stage_out_channels_k)
stage_in_channels.append(stage_in_channels_k)
stage_links.append(stage_links_k)
stage_out_channels_k = None
return stage_out_channels, stage_in_channels, stage_links
stage_out_channels, stage_in_channels, stage_links = calc_stage_params()
net = HarDNet(
init_block_channels=init_block_channels,
unit_in_channels=stage_in_channels,
unit_out_channels=stage_out_channels,
unit_links=stage_links,
use_deptwise=use_deptwise,
use_last_dropout=use_dropout,
output_dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def hardnet39ds(**kwargs):
"""
HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs)
def hardnet68ds(**kwargs):
"""
HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs)
def hardnet68(**kwargs):
"""
HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs)
def hardnet85(**kwargs):
"""
HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
hardnet39ds,
hardnet68ds,
hardnet68,
hardnet85,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hardnet39ds or weight_count == 3488228)
assert (model != hardnet68ds or weight_count == 4180602)
assert (model != hardnet68 or weight_count == 17565348)
assert (model != hardnet85 or weight_count == 36670212)
if __name__ == "__main__":
_test()
| 24,226 | 35.213752 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/sinet.py | """
SINet for image segmentation, implemented in TensorFlow.
Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
"""
__all__ = ['SINet', 'sinet_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import PReLU2, BatchNorm, AvgPool2d, conv1x1, get_activation_layer, conv1x1_block, conv3x3_block,\
round_channels, dwconv_block, InterpolationBlock, ChannelShuffle, SimpleSequential, Concurrent, get_channel_axis,\
is_channels_first
class SEBlock(nn.Layer):
"""
SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation="relu",
out_activation="sigmoid",
data_format="channels_last",
**kwargs):
super(SEBlock, self).__init__(**kwargs)
self.data_format = data_format
self.use_conv2 = (reduction > 1)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.fc1 = nn.Dense(
units=mid_channels,
input_dim=channels,
name="fc1")
if self.use_conv2:
self.activ = get_activation_layer(mid_activation, name="activ")
self.fc2 = nn.Dense(
units=channels,
input_dim=mid_channels,
name="fc2")
self.sigmoid = get_activation_layer(out_activation, name="sigmoid")
def call(self, x, training=None):
w = self.pool(x)
w = self.fc1(w)
if self.use_conv2:
w = self.activ(w)
w = self.fc2(w)
w = self.sigmoid(w)
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
x = x * w
return x
class DwsConvBlock(nn.Layer):
"""
SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation="relu",
pw_activation="relu",
se_reduction=0,
data_format="channels_last",
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.use_se = (se_reduction > 0)
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation,
data_format=data_format,
name="dw_conv")
if self.use_se:
self.se = SEBlock(
channels=in_channels,
reduction=se_reduction,
round_mid=False,
mid_activation=(lambda: PReLU2(in_channels // se_reduction, data_format=data_format, name="activ")),
out_activation=(lambda: PReLU2(in_channels, data_format=data_format, name="sigmoid")),
data_format=data_format,
name="se")
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.dw_conv(x, training=None)
if self.use_se:
x = self.se(x, training=None)
x = self.pw_conv(x, training=None)
return x
def dwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation="relu",
pw_activation="relu",
se_reduction=0,
data_format="channels_last",
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default 'relu'
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default 'relu'
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
dw_use_bn=dw_use_bn,
pw_use_bn=pw_use_bn,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
se_reduction=se_reduction,
data_format=data_format,
**kwargs)
def dwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 depthwise version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
class FDWConvBlock(nn.Layer):
"""
Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function after the each convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(FDWConvBlock, self).__init__(**kwargs)
assert use_bn
self.activate = (activation is not None)
self.v_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="v_conv")
self.h_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="h_conv")
if self.activate:
self.act = get_activation_layer(activation, name="act")
def call(self, x, training=None):
x = self.v_conv(x, training=None) + self.h_conv(x, training=None)
if self.activate:
x = self.act(x)
return x
def fdwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
def fdwconv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
5x5 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
class SBBlock(nn.Layer):
"""
SB-block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for a factorized depthwise separable convolution block.
scale_factor : int
Scale factor.
size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor,
size,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBBlock, self).__init__(**kwargs)
self.use_scale = (scale_factor > 1)
if self.use_scale:
self.down_scale = AvgPool2d(
pool_size=scale_factor,
strides=scale_factor,
data_format=data_format,
name="down_scale")
self.up_scale = InterpolationBlock(
scale_factor=scale_factor,
out_size=size,
data_format=data_format,
name="up_scale")
use_fdw = (scale_factor > 0)
if use_fdw:
fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block
self.conv1 = fdwconv3x3_class(
in_channels=in_channels,
out_channels=in_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(in_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
else:
self.conv1 = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(in_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
def call(self, x, training=None):
if self.use_scale:
x = self.down_scale(x)
x = self.conv1(x, training=None)
x = self.conv2(x, training=None)
if self.use_scale:
x = self.up_scale(x)
x = self.bn(x, training=None)
return x
class PreActivation(nn.Layer):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
bn_eps=1e-5,
data_format="channels_last",
**kwargs):
super(PreActivation, self).__init__(**kwargs)
assert (in_channels is not None)
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
self.activ = PReLU2(in_channels, data_format=data_format, name="activ")
def call(self, x, training=None):
x = self.bn(x, training=None)
x = self.activ(x)
return x
class ESPBlock(nn.Layer):
"""
ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_sizes : list of int
Convolution window size for branches.
scale_factors : list of int
Scale factor for branches.
use_residual : bool
Whether to use residual connection.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes,
scale_factors,
use_residual,
in_size,
bn_eps,
data_format="channels_last",
**kwargs):
super(ESPBlock, self).__init__(**kwargs)
self.use_residual = use_residual
groups = len(kernel_sizes)
mid_channels = int(out_channels / groups)
res_channels = out_channels - groups * mid_channels
self.conv = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
data_format=data_format,
name="conv")
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups,
data_format=data_format,
name="c_shuffle")
self.branches = Concurrent(
data_format=data_format,
name="branches")
for i in range(groups):
out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels
self.branches.add(SBBlock(
in_channels=mid_channels,
out_channels=out_channels_i,
kernel_size=kernel_sizes[i],
scale_factor=scale_factors[i],
size=in_size,
bn_eps=bn_eps,
data_format=data_format,
name="branch{}".format(i + 1)))
self.preactiv = PreActivation(
in_channels=out_channels,
bn_eps=bn_eps,
data_format=data_format,
name="preactiv")
def call(self, x, training=None):
if self.use_residual:
identity = x
x = self.conv(x)
x = self.c_shuffle(x)
x = self.branches(x, training=None)
if self.use_residual:
x = identity + x
x = self.preactiv(x, training=None)
return x
class SBStage(nn.Layer):
"""
SB stage.
Parameters:
----------
in_channels : int
Number of input channels.
down_channels : int
Number of output channels for a downscale block.
channels_list : list of int
Number of output channels for all residual block.
kernel_sizes_list : list of int
Convolution window size for branches.
scale_factors_list : list of int
Scale factor for branches.
use_residual_list : list of int
List of flags for using residual in each ESP-block.
se_reduction : int
Squeeze reduction value (0 means no-se).
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
down_channels,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
se_reduction,
in_size,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBStage, self).__init__(**kwargs)
self.data_format = data_format
self.down_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=down_channels,
strides=2,
dw_use_bn=False,
bn_eps=bn_eps,
dw_activation=None,
pw_activation=(lambda: PReLU2(down_channels, data_format=data_format, name="activ")),
se_reduction=se_reduction,
data_format=data_format,
name="down_conv")
in_channels = down_channels
self.main_branch = SimpleSequential(name="main_branch")
for i, out_channels in enumerate(channels_list):
use_residual = (use_residual_list[i] == 1)
kernel_sizes = kernel_sizes_list[i]
scale_factors = scale_factors_list[i]
self.main_branch.add(ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
scale_factors=scale_factors,
use_residual=use_residual,
in_size=((in_size[0] // 2, in_size[1] // 2) if in_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = out_channels
self.preactiv = PreActivation(
in_channels=(down_channels + in_channels),
bn_eps=bn_eps,
data_format=data_format,
name="preactiv")
def call(self, x, training=None):
x = self.down_conv(x, training=None)
y = self.main_branch(x, training=None)
x = tf.concat([x, y], axis=get_channel_axis(self.data_format))
x = self.preactiv(x, training=None)
return x, y
class SBEncoderInitBlock(nn.Layer):
"""
SB encoder specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBEncoderInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = dwsconv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
dw_use_bn=False,
bn_eps=bn_eps,
dw_activation=None,
pw_activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
se_reduction=1,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=None)
x = self.conv2(x, training=None)
return x
class SBEncoder(nn.Layer):
"""
SB encoder for SINet.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of input channels.
init_block_channels : list int
Number of output channels for convolutions in the initial block.
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
init_block_channels,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
in_size,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBEncoder, self).__init__(**kwargs)
self.init_block = SBEncoderInitBlock(
in_channels=in_channels,
mid_channels=init_block_channels[0],
out_channels=init_block_channels[1],
bn_eps=bn_eps,
data_format=data_format,
name="init_block")
in_channels = init_block_channels[1]
self.stage1 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[0],
channels_list=channels_list[0],
kernel_sizes_list=kernel_sizes_list[0],
scale_factors_list=scale_factors_list[0],
use_residual_list=use_residual_list[0],
se_reduction=1,
in_size=((in_size[0] // 4, in_size[1] // 4) if in_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="stage1")
in_channels = down_channels_list[0] + channels_list[0][-1]
self.stage2 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[1],
channels_list=channels_list[1],
kernel_sizes_list=kernel_sizes_list[1],
scale_factors_list=scale_factors_list[1],
use_residual_list=use_residual_list[1],
se_reduction=2,
in_size=((in_size[0] // 8, in_size[1] // 8) if in_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="stage2")
in_channels = down_channels_list[1] + channels_list[1][-1]
self.output_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="output")
def call(self, x, training=None):
y1 = self.init_block(x, training=None)
x, y2 = self.stage1(y1, training=None)
x, _ = self.stage2(x, training=None)
x = self.output_conv(x)
return x, y2, y1
class SBDecodeBlock(nn.Layer):
"""
SB decoder block for SINet.
Parameters:
----------
channels : int
Number of output classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
out_size,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBDecodeBlock, self).__init__(**kwargs)
assert (channels is not None)
self.data_format = data_format
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size,
data_format=data_format,
name="up")
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
def call(self, x, y, training=None):
x = self.up(x)
x = self.bn(x, training=None)
w_conf = tf.nn.softmax(x)
axis = get_channel_axis(self.data_format)
w_max = tf.broadcast_to(tf.expand_dims(tf.reduce_max(w_conf, axis=axis), axis=axis), shape=x.shape)
x = y * (1 - w_max) + x
return x
class SBDecoder(nn.Layer):
"""
SB decoder for SINet.
Parameters:
----------
dim2 : int
Size of dimension #2.
classes : int
Number of segmentation classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dim2,
classes,
out_size,
bn_eps,
data_format="channels_last",
**kwargs):
super(SBDecoder, self).__init__(**kwargs)
self.decode1 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="decode1")
self.decode2 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="decode2")
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=classes,
bn_eps=bn_eps,
activation=(lambda: PReLU2(classes, data_format=data_format, name="activ")),
data_format=data_format,
name="conv3c")
self.output_conv = nn.Conv2DTranspose(
filters=classes,
kernel_size=2,
strides=2,
padding="valid",
output_padding=0,
use_bias=False,
data_format=data_format,
name="output_conv")
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size,
data_format=data_format,
name="up")
def call(self, y3, y2, y1, training=None):
y2 = self.conv3c(y2, training=None)
x = self.decode1(y3, y2, training=None)
x = self.decode2(x, y1, training=None)
x = self.output_conv(x, training=None)
x = self.up(x)
return x
class SINet(tf.keras.Model):
"""
SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
dim2 : int
Size of dimension #2.
bn_eps : float
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
dim2,
bn_eps,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(1024, 2048),
classes=21,
data_format="channels_last",
**kwargs):
super(SINet, self).__init__(**kwargs)
assert (fixed_size is not None)
assert (in_channels > 0)
assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0))
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.aux = aux
init_block_channels = [16, classes]
out_channels = classes
self.encoder = SBEncoder(
in_channels=in_channels,
out_channels=out_channels,
init_block_channels=init_block_channels,
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
in_size=(in_size if fixed_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="encoder")
self.decoder = SBDecoder(
dim2=dim2,
classes=classes,
out_size=(in_size if fixed_size else None),
bn_eps=bn_eps,
data_format=data_format,
name="decoder")
def call(self, x, training=None):
y3, y2, y1 = self.encoder(x, training=None)
x = self.decoder(y3, y2, y1, training=None)
if self.aux:
return x, y3
else:
return x
def get_sinet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SINet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
kernel_sizes_list = [
[[3, 5], [3, 3], [3, 3]],
[[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]]
scale_factors_list = [
[[1, 1], [0, 1], [0, 1]],
[[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]]
chnn = 4
dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)]
dim1 = dims[0]
dim2 = dims[1]
dim3 = dims[2]
dim4 = dims[3]
p = len(kernel_sizes_list[0])
q = len(kernel_sizes_list[1])
channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))]
use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))]
down_channels_list = [dim1, dim2]
net = SINet(
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
dim2=dims[1],
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sinet_cityscapes(classes=19, **kwargs):
"""
SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze
Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sinet(classes=classes, bn_eps=1e-3, model_name="sinet_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (1024, 2048)
aux = False
fixed_size = False
pretrained = False
models = [
sinet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == 19) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == 19) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sinet_cityscapes or weight_count == 119418)
if __name__ == "__main__":
_test()
| 41,973 | 33.014587 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/shufflenetv2b.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow. The alternative version.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock, MaxPool2d,\
SimpleSequential, get_channel_axis, flatten
class ShuffleUnit(nn.Layer):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first,
data_format="channels_last",
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.data_format = data_format
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
self.conv1 = conv1x1_block(
in_channels=y2_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.dconv = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(2 if self.downsample else 1),
activation=None,
data_format=data_format,
name="dconv")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=y2_out_channels,
data_format=data_format,
name="conv2")
if self.use_se:
self.se = SEBlock(
channels=y2_out_channels,
data_format=data_format,
name="se")
if downsample:
self.shortcut_dconv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2,
activation=None,
data_format=data_format,
name="shortcut_dconv")
self.shortcut_conv = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
data_format=data_format,
name="shortcut_conv")
if shuffle_group_first:
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2,
data_format=data_format,
name="c_shuffle")
else:
self.c_shuffle = ChannelShuffle2(
channels=out_channels,
groups=2,
data_format=data_format,
name="c_shuffle")
def call(self, x, training=None):
if self.downsample:
y1 = self.shortcut_dconv(x, training=training)
y1 = self.shortcut_conv(y1, training=training)
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(self.data_format))
y2 = self.conv1(x2, training=training)
y2 = self.dconv(y2, training=training)
y2 = self.conv2(y2, training=training)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(nn.Layer):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
ceil_mode=False,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class ShuffleNetV2b(tf.keras.Model):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2b, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
shuffle_group_first=shuffle_group_first,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
if __name__ == "__main__":
_test()
| 14,161 | 32.559242 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/menet.py | """
MENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle, Conv2d, BatchNorm, AvgPool2d,\
MaxPool2d, SimpleSequential, get_channel_axis, flatten
class MEUnit(nn.Layer):
"""
MENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
data_format="channels_last",
**kwargs):
super(MEUnit, self).__init__(**kwargs)
self.data_format = data_format
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
# residual branch
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name="compress_conv1")
self.compress_bn1 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="compress_bn1")
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups,
data_format=data_format,
name="c_shuffle")
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1),
data_format=data_format,
name="dw_conv2")
self.dw_bn2 = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="dw_bn2")
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name="expand_conv3")
self.expand_bn3 = BatchNorm(
# in_channels=out_channels,
data_format=data_format,
name="expand_bn3")
if downsample:
self.avgpool = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="avgpool")
self.activ = nn.ReLU()
# fusion branch
self.s_merge_conv = conv1x1(
in_channels=mid_channels,
out_channels=side_channels,
data_format=data_format,
name="s_merge_conv")
self.s_merge_bn = BatchNorm(
# in_channels=side_channels,
data_format=data_format,
name="s_merge_bn")
self.s_conv = conv3x3(
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if self.downsample else 1),
data_format=data_format,
name="s_conv")
self.s_conv_bn = BatchNorm(
# in_channels=side_channels,
data_format=data_format,
name="s_conv_bn")
self.s_evolve_conv = conv1x1(
in_channels=side_channels,
out_channels=mid_channels,
data_format=data_format,
name="s_evolve_conv")
self.s_evolve_bn = BatchNorm(
# in_channels=mid_channels,
data_format=data_format,
name="s_evolve_bn")
def call(self, x, training=None):
identity = x
# pointwise group convolution 1
x = self.compress_conv1(x)
x = self.compress_bn1(x, training=training)
x = self.activ(x)
x = self.c_shuffle(x)
# merging
y = self.s_merge_conv(x)
y = self.s_merge_bn(y, training=training)
y = self.activ(y)
# depthwise convolution (bottleneck)
x = self.dw_conv2(x)
x = self.dw_bn2(x, training=training)
# evolution
y = self.s_conv(y)
y = self.s_conv_bn(y, training=training)
y = self.activ(y)
y = self.s_evolve_conv(y)
y = self.s_evolve_bn(y, training=training)
y = tf.nn.sigmoid(y)
x = x * y
# pointwise group convolution 2
x = self.expand_conv3(x)
x = self.expand_bn3(x, training=training)
# identity branch
if self.downsample:
identity = self.avgpool(identity)
x = tf.concat([x, identity], axis=get_channel_axis(self.data_format))
else:
x = x + identity
x = self.activ(x)
return x
class MEInitBlock(nn.Layer):
"""
MENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(MEInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
# in_channels=out_channels,
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class MENet(tf.keras.Model):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MENet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(MEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add(MEUnit(
in_channels=in_channels,
out_channels=out_channels,
side_channels=side_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
if __name__ == "__main__":
_test()
| 18,147 | 33.112782 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/voca.py | """
VOCA for speech-driven facial animation, implemented in TensorFlow.
Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
"""
__all__ = ['VOCA', 'voca8flame']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import BatchNorm, ConvBlock, SimpleSequential, flatten, get_channel_axis, is_channels_first
class VocaEncoder(nn.Layer):
"""
VOCA encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
base_persons : int
Number of base persons (subjects).
encoder_features : int
Number of encoder features.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features,
audio_window_size,
base_persons,
encoder_features,
data_format="channels_last",
**kwargs):
super(VocaEncoder, self).__init__(**kwargs)
self.audio_window_size = audio_window_size
self.data_format = data_format
channels = (32, 32, 64, 64)
fc1_channels = 128
self.bn = BatchNorm(
epsilon=1e-5,
data_format=data_format,
name="bn")
in_channels = audio_features + base_persons
self.branch = SimpleSequential(name="branch")
for i, out_channels in enumerate(channels):
self.branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False,
data_format=data_format,
name="conv{}".format(i + 1)))
in_channels = out_channels
in_channels += base_persons
self.fc1 = nn.Dense(
units=fc1_channels,
input_dim=in_channels,
name="fc1")
self.fc2 = nn.Dense(
units=encoder_features,
input_dim=fc1_channels,
name="fc2")
def call(self, x, pid, training=None):
x = self.bn(x, training=training)
if is_channels_first(self.data_format):
x = tf.transpose(x, perm=(0, 3, 2, 1))
y = tf.expand_dims(tf.expand_dims(pid, -1), -1)
y = tf.tile(y, multiples=(1, 1, self.audio_window_size, 1))
else:
x = tf.transpose(x, perm=(0, 1, 3, 2))
y = tf.expand_dims(tf.expand_dims(pid, 1), 1)
y = tf.tile(y, multiples=(1, self.audio_window_size, 1, 1))
x = tf.concat([x, y], axis=get_channel_axis(self.data_format))
x = self.branch(x)
x = flatten(x, self.data_format)
x = tf.concat([x, pid], axis=1)
x = self.fc1(x)
x = tf.math.tanh(x)
x = self.fc2(x)
return x
class VOCA(tf.keras.Model):
"""
VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
base_persons : int, default 8
Number of base persons (subjects).
encoder_features : int, default 50
Number of encoder features.
vertices : int, default 5023
Number of 3D geometry vertices.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
base_persons=8,
encoder_features=50,
vertices=5023,
data_format="channels_last",
**kwargs):
super(VOCA, self).__init__(**kwargs)
self.base_persons = base_persons
self.data_format = data_format
self.encoder = VocaEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
base_persons=base_persons,
encoder_features=encoder_features,
data_format=data_format,
name="encoder")
self.decoder = nn.Dense(
units=(3 * vertices),
input_dim=encoder_features,
name="decoder")
def call(self, x, pid, training=None):
pid = tf.one_hot(pid, depth=self.base_persons)
x = self.encoder(x, pid, training=training)
x = self.decoder(x)
x = tf.reshape(x, shape=(x.get_shape().as_list()[0], 1, -1, 3)) if is_channels_first(self.data_format) else\
tf.reshape(x, shape=(x.get_shape().as_list()[0], -1, 3, 1))
return x
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def voca8flame(**kwargs):
"""
VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking
Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# data_format = "channels_first"
data_format = "channels_last"
pretrained = False
models = [
voca8flame,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
audio_features = 29
audio_window_size = 16
vertices = 5023
x = tf.random.normal((batch, 1, audio_window_size, audio_features) if is_channels_first(data_format) else
(batch, audio_window_size, audio_features, 1))
pid = tf.fill(dims=(batch,), value=3)
y = net(x, pid)
if is_channels_first(data_format):
assert (y.shape == (batch, 1, vertices, 3))
else:
assert (y.shape == (batch, vertices, 3, 1))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != voca8flame or weight_count == 809563)
if __name__ == "__main__":
_test()
| 8,094 | 32.589212 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/wrn_cifar.py | """
WRN for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10',
'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3, SimpleSequential, flatten, is_channels_first
from .preresnet import PreResUnit, PreResActivation
class CIFARWRN(tf.keras.Model):
"""
WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARWRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=False,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_wrn_cifar(classes,
blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARWRN(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def wrn16_10_cifar10(classes=10, **kwargs):
"""
WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs)
def wrn16_10_cifar100(classes=100, **kwargs):
"""
WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs)
def wrn16_10_svhn(classes=10, **kwargs):
"""
WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs)
def wrn28_10_cifar10(classes=10, **kwargs):
"""
WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs)
def wrn28_10_cifar100(classes=100, **kwargs):
"""
WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs)
def wrn28_10_svhn(classes=10, **kwargs):
"""
WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs)
def wrn40_8_cifar10(classes=10, **kwargs):
"""
WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs)
def wrn40_8_cifar100(classes=100, **kwargs):
"""
WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs)
def wrn40_8_svhn(classes=10, **kwargs):
"""
WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(wrn16_10_cifar10, 10),
(wrn16_10_cifar100, 100),
(wrn16_10_svhn, 10),
(wrn28_10_cifar10, 10),
(wrn28_10_cifar100, 100),
(wrn28_10_svhn, 10),
(wrn40_8_cifar10, 10),
(wrn40_8_cifar100, 100),
(wrn40_8_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn16_10_cifar10 or weight_count == 17116634)
assert (model != wrn16_10_cifar100 or weight_count == 17174324)
assert (model != wrn16_10_svhn or weight_count == 17116634)
assert (model != wrn28_10_cifar10 or weight_count == 36479194)
assert (model != wrn28_10_cifar100 or weight_count == 36536884)
assert (model != wrn28_10_svhn or weight_count == 36479194)
assert (model != wrn40_8_cifar10 or weight_count == 35748314)
assert (model != wrn40_8_cifar100 or weight_count == 35794484)
assert (model != wrn40_8_svhn or weight_count == 35748314)
if __name__ == "__main__":
_test()
| 11,768 | 34.342342 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/inceptionresnetv2.py | """
InceptionResNetV2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV2', 'inceptionresnetv2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, conv1x1_block, conv3x3_block, SimpleSequential, Concurrent, flatten, is_channels_first
from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit
class InceptBlock5b(nn.Layer):
"""
InceptionResNetV2 type Mixed-5b block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
data_format="channels_last",
**kwargs):
super(InceptBlock5b, self).__init__(**kwargs)
in_channels = 192
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.children.append(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps,
data_format=data_format,
name="branch1"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_eps=bn_eps,
data_format=data_format,
name="branch2"))
self.branches.children.append(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps,
data_format=data_format,
name="branch3"))
self.branches.children.append(AvgPoolBranch(
in_channels=in_channels,
out_channels=64,
bn_eps=bn_eps,
data_format=data_format,
name="branch4"))
def call(self, x, training=None):
x = self.branches(x, training=training)
return x
class InceptInitBlock(nn.Layer):
"""
InceptionResNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
bn_eps,
in_channels,
data_format="channels_last",
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_eps=bn_eps,
data_format=data_format,
name="conv3")
self.pool1 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool1")
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv4")
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_eps=bn_eps,
data_format=data_format,
name="conv5")
self.pool2 = MaxPool2d(
pool_size=3,
strides=2,
padding=0,
data_format=data_format,
name="pool2")
self.block = InceptBlock5b(
bn_eps=bn_eps,
data_format=data_format,
name="block")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool1(x)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.pool2(x)
x = self.block(x, training=training)
return x
class InceptionResNetV2(tf.keras.Model):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000,
data_format="channels_last",
**kwargs):
super(InceptionResNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
layers = [10, 21, 11]
in_channels_list = [320, 1088, 2080]
normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]]
reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = SimpleSequential(name="features")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
data_format=data_format,
name="unit{}".format(j + 1),
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=2080,
out_channels=1536,
bn_eps=bn_eps,
data_format=data_format,
name="final_block"))
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="output1/dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=1536,
name="output1/fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_inceptionresnetv2(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create InceptionResNetV2 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV2(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
inceptionresnetv2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 299, 299) if is_channels_first(data_format) else (batch, 299, 299, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv2 or weight_count == 55843464)
if __name__ == "__main__":
_test()
| 11,470 | 33.038576 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ghostnet.py | """
GhostNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
"""
__all__ = ['GhostNet', 'ghostnet']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
dwsconv3x3_block, SEBlock, SimpleSequential, get_channel_axis, flatten, is_channels_first
class GhostHSigmoid(nn.Layer):
"""
Approximated sigmoid function, specific for GhostNet.
"""
def __init__(self, **kwargs):
super(GhostHSigmoid, self).__init__(**kwargs)
def call(self, x, training=None):
return tf.clip_by_value(x, 0.0, 1.0)
class GhostConvBlock(nn.Layer):
"""
GhostNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
activation="relu",
data_format="channels_last",
**kwargs):
super(GhostConvBlock, self).__init__(**kwargs)
self.data_format = data_format
main_out_channels = math.ceil(0.5 * out_channels)
cheap_out_channels = out_channels - main_out_channels
self.main_conv = conv1x1_block(
in_channels=in_channels,
out_channels=main_out_channels,
activation=activation,
data_format=data_format,
name="main_conv")
self.cheap_conv = dwconv3x3_block(
in_channels=main_out_channels,
out_channels=cheap_out_channels,
activation=activation,
data_format=data_format,
name="cheap_conv")
def call(self, x, training=None):
x = self.main_conv(x, training=training)
y = self.cheap_conv(x, training=training)
return tf.concat([x, y], axis=get_channel_axis(self.data_format))
class GhostExpBlock(nn.Layer):
"""
GhostNet expansion block for residual path in GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
data_format="channels_last",
**kwargs):
super(GhostExpBlock, self).__init__(**kwargs)
self.use_dw_conv = (strides != 1)
self.use_se = use_se
mid_channels = int(math.ceil(exp_factor * in_channels))
self.exp_conv = GhostConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
name="exp_conv")
if self.use_dw_conv:
dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block
self.dw_conv = dw_conv_class(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=None,
data_format=data_format,
name="dw_conv")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
out_activation=GhostHSigmoid(),
data_format=data_format,
name="se")
self.pw_conv = GhostConvBlock(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.exp_conv(x, training=training)
if self.use_dw_conv:
x = self.dw_conv(x, training=training)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x, training=training)
return x
class GhostUnit(nn.Layer):
"""
GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
data_format="channels_last",
**kwargs):
super(GhostUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = GhostExpBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
pw_activation=None,
data_format=data_format,
name="identity_conv")
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
return x
class GhostClassifier(nn.Layer):
"""
GhostNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
data_format="channels_last",
**kwargs):
super(GhostClassifier, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x)
return x
class GhostNet(tf.keras.Model):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
exp_factors,
use_se,
first_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(GhostNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
use_se_flag = use_se[i][j] == 1
stage.add(GhostUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se_flag,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = GhostClassifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
x = flatten(x, self.data_format)
return x
def get_ghostnet(width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create GhostNet model with specific parameters.
Parameters:
----------
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]]
kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]]
use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]]
final_block_channels = 960
classifier_mid_channels = 1280
first_stride = False
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale, divisor=4)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale, divisor=4)
net = GhostNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
exp_factors=exp_factors,
use_se=use_se,
first_stride=first_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ghostnet(**kwargs):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ghostnet(model_name="ghostnet", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ghostnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ghostnet or weight_count == 5180840)
if __name__ == "__main__":
_test()
| 15,092 | 32.614699 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/efficientnet.py | """
EfficientNet for ImageNet-1K, implemented in TensorFlow.
Original papers:
- 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946,
- 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665.
"""
__all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6',
'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b',
'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b',
'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c',
'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\
SimpleSequential, is_channels_first
def calc_tf_padding(x,
kernel_size,
strides=1,
dilation=1,
data_format="channels_last"):
"""
Calculate TF-same like padding size.
Parameters:
----------
x : tensor
Input tensor.
kernel_size : int
Convolution window size.
strides : int, default 1
Strides of the convolution.
dilation : int, default 1
Dilation value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
tuple of 4 int
The size of the padding.
"""
height, width = x.shape[2:]
oh = math.ceil(height / strides)
ow = math.ceil(width / strides)
pad_h = max((oh - 1) * strides + (kernel_size - 1) * dilation + 1 - height, 0)
pad_w = max((ow - 1) * strides + (kernel_size - 1) * dilation + 1 - width, 0)
if is_channels_first(data_format):
paddings_tf = [[0, 0], [0, 0], [pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w // 2]]
else:
paddings_tf = [[0, 0], [pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w // 2], [0, 0]]
return paddings_tf
class EffiDwsConvUnit(nn.Layer):
"""
EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_eps,
activation,
tf_mode,
data_format="channels_last",
**kwargs):
super(EffiDwsConvUnit, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.data_format = data_format
self.residual = (in_channels == out_channels) and (strides == 1)
self.dw_conv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
padding=(0 if tf_mode else 1),
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="dw_conv")
self.se = SEBlock(
channels=in_channels,
reduction=4,
mid_activation=activation,
data_format=data_format,
name="se")
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
if self.residual:
identity = x
if self.tf_mode:
x = tf.pad(x, paddings=calc_tf_padding(x, kernel_size=3, data_format=self.data_format))
x = self.dw_conv(x, training=training)
x = self.se(x)
x = self.pw_conv(x, training=training)
if self.residual:
x = x + identity
return x
class EffiInvResUnit(nn.Layer):
"""
EfficientNet inverted residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
exp_factor,
se_factor,
bn_eps,
activation,
tf_mode,
data_format="channels_last",
**kwargs):
super(EffiInvResUnit, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.tf_mode = tf_mode
self.data_format = data_format
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor
dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=(0 if tf_mode else (kernel_size // 2)),
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv2")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation,
data_format=data_format,
name="se")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
if self.residual:
identity = x
x = self.conv1(x, training=training)
if self.tf_mode:
x = tf.pad(x, paddings=calc_tf_padding(x, kernel_size=self.kernel_size, strides=self.strides,
data_format=self.data_format))
x = self.conv2(x, training=training)
if self.use_se:
x = self.se(x)
x = self.conv3(x, training=training)
if self.residual:
x = x + identity
return x
class EffiInitBlock(nn.Layer):
"""
EfficientNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
activation,
tf_mode,
data_format="channels_last",
**kwargs):
super(EffiInitBlock, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.data_format = data_format
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=(0 if tf_mode else 1),
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv")
def call(self, x, training=None):
if self.tf_mode:
x = tf.pad(x, paddings=calc_tf_padding(x, kernel_size=3, strides=2, data_format=self.data_format))
x = self.conv(x, training=training)
return x
class EfficientNet(tf.keras.Model):
"""
EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(EfficientNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
activation = "swish"
self.features = SimpleSequential(name="features")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i == 0:
stage.add(EffiDwsConvUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode,
data_format=data_format,
name="unit{}".format(j + 1)))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=4,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_efficientnet(version,
in_size,
tf_mode=False,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create EfficientNet model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('b0'...'b7').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "b0":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
dropout_rate = 0.2
elif version == "b1":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
dropout_rate = 0.2
elif version == "b2":
assert (in_size == (260, 260))
depth_factor = 1.2
width_factor = 1.1
dropout_rate = 0.3
elif version == "b3":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
dropout_rate = 0.3
elif version == "b4":
assert (in_size == (380, 380))
depth_factor = 1.8
width_factor = 1.4
dropout_rate = 0.4
elif version == "b5":
assert (in_size == (456, 456))
depth_factor = 2.2
width_factor = 1.6
dropout_rate = 0.4
elif version == "b6":
assert (in_size == (528, 528))
depth_factor = 2.6
width_factor = 1.8
dropout_rate = 0.5
elif version == "b7":
assert (in_size == (600, 600))
depth_factor = 3.1
width_factor = 2.0
dropout_rate = 0.5
elif version == "b8":
assert (in_size == (672, 672))
depth_factor = 3.6
width_factor = 2.2
dropout_rate = 0.5
else:
raise ValueError("Unsupported EfficientNet version {}".format(version))
init_block_channels = 32
layers = [1, 2, 2, 3, 3, 4, 1]
downsample = [1, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 40, 80, 112, 192, 320]
expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3]
strides_per_stage = [1, 2, 2, 2, 1, 2, 1]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_eps=bn_eps,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def efficientnet_b0(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs)
def efficientnet_b1(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs)
def efficientnet_b2(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs)
def efficientnet_b3(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs)
def efficientnet_b4(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs)
def efficientnet_b5(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs)
def efficientnet_b6(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs)
def efficientnet_b7(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs)
def efficientnet_b8(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs)
def efficientnet_b0b(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0b",
**kwargs)
def efficientnet_b1b(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1b",
**kwargs)
def efficientnet_b2b(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2b",
**kwargs)
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3b",
**kwargs)
def efficientnet_b4b(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4b",
**kwargs)
def efficientnet_b5b(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5b",
**kwargs)
def efficientnet_b6b(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6b",
**kwargs)
def efficientnet_b7b(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7b",
**kwargs)
def efficientnet_b0c(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b0c",
**kwargs)
def efficientnet_b1c(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b1c",
**kwargs)
def efficientnet_b2c(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b2c",
**kwargs)
def efficientnet_b3c(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b3c",
**kwargs)
def efficientnet_b4c(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b4c",
**kwargs)
def efficientnet_b5c(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b5c",
**kwargs)
def efficientnet_b6c(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b6c",
**kwargs)
def efficientnet_b7c(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b7c",
**kwargs)
def efficientnet_b8c(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_eps=1e-3, model_name="efficientnet_b8c",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_b8,
efficientnet_b0b,
efficientnet_b1b,
efficientnet_b2b,
efficientnet_b3b,
efficientnet_b4b,
efficientnet_b5b,
efficientnet_b6b,
efficientnet_b7b,
efficientnet_b0c,
efficientnet_b1c,
efficientnet_b2c,
efficientnet_b3c,
efficientnet_b4c,
efficientnet_b5c,
efficientnet_b6c,
efficientnet_b7c,
efficientnet_b8c,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_b0 or weight_count == 5288548)
assert (model != efficientnet_b1 or weight_count == 7794184)
assert (model != efficientnet_b2 or weight_count == 9109994)
assert (model != efficientnet_b3 or weight_count == 12233232)
assert (model != efficientnet_b4 or weight_count == 19341616)
assert (model != efficientnet_b5 or weight_count == 30389784)
assert (model != efficientnet_b6 or weight_count == 43040704)
assert (model != efficientnet_b7 or weight_count == 66347960)
assert (model != efficientnet_b8 or weight_count == 87413142)
assert (model != efficientnet_b0b or weight_count == 5288548)
assert (model != efficientnet_b1b or weight_count == 7794184)
assert (model != efficientnet_b2b or weight_count == 9109994)
assert (model != efficientnet_b3b or weight_count == 12233232)
assert (model != efficientnet_b4b or weight_count == 19341616)
assert (model != efficientnet_b5b or weight_count == 30389784)
assert (model != efficientnet_b6b or weight_count == 43040704)
assert (model != efficientnet_b7b or weight_count == 66347960)
if __name__ == "__main__":
_test()
| 40,223 | 36.804511 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/pnasnet.py | """
PNASNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
"""
__all__ = ['PNASNet', 'pnasnet5large']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, conv1x1, SimpleSequential, flatten, is_channels_first, get_channel_axis
from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock
class PnasMaxPoolBlock(nn.Layer):
"""
PNASNet specific Max pooling layer with extra padding.
Parameters:
----------
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
strides=2,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(PnasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.pool = MaxPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((1, 0), (1, 0)),
data_format=data_format)
def call(self, x, training=None):
if self.extra_padding:
x = self.pad(x)
x = self.pool(x)
if self.extra_padding:
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
return x
def pnas_conv1x1(in_channels,
out_channels,
strides=1,
data_format="channels_last",
**kwargs):
"""
1x1 version of the PNASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1,
data_format=data_format,
**kwargs)
class DwsBranch(nn.Layer):
"""
PNASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
padding = kernel_size // 2
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding,
data_format=data_format,
name="conv1")
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
def dws_branch_k3(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
3x3 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
def dws_branch_k5(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
5x5 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
def dws_branch_k7(in_channels,
out_channels,
strides=2,
extra_padding=False,
data_format="channels_last",
**kwargs):
"""
7x7 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
extra_padding=extra_padding,
stem=False,
data_format=data_format,
**kwargs)
class PnasMaxPathBlock(nn.Layer):
"""
PNASNet specific `max path` auxiliary block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(PnasMaxPathBlock, self).__init__(**kwargs)
self.maxpool = PnasMaxPoolBlock(
data_format=data_format,
name="maxpool")
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.maxpool(x)
x = self.conv(x)
x = self.bn(x, training=training)
return x
class PnasBaseUnit(nn.Layer):
"""
PNASNet base unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(PnasBaseUnit, self).__init__(**kwargs)
self.data_format = data_format
def cell_forward(self, x, x_prev, training=None):
assert (hasattr(self, 'comb0_left'))
x_left = x_prev
x_right = x
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_left, training=training)
x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_right, training=training) + self.comb2_right(x_right, training=training)
x3 = self.comb3_left(x2, training=training) + self.comb3_right(x_right, training=training)
x4 = self.comb4_left(x_left, training=training) + (self.comb4_right(x_right, training=training) if
self.comb4_right else x_right)
x_out = tf.concat([x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class Stem1Unit(PnasBaseUnit):
"""
PNASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 5
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_1x1")
self.comb0_left = dws_branch_k5(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb0_left")
self.comb0_right = PnasMaxPathBlock(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_left")
self.comb1_right = PnasMaxPoolBlock(
data_format=data_format,
name="comb1_right")
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb2_right")
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
data_format=data_format,
name="comb3_left")
self.comb3_right = PnasMaxPoolBlock(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb4_left")
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="comb4_right")
def call(self, x, training=None):
x_prev = x
x = self.conv_1x1(x, training=training)
x_out = self.cell_forward(x, x_prev, training=training)
return x_out
class PnasUnit(PnasBaseUnit):
"""
PNASNet ordinary unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
reduction : bool, default False
Whether to use reduction.
extra_padding : bool, default False
Whether to use extra padding.
match_prev_layer_dimensions : bool, default False
Whether to match previous layer dimensions.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
reduction=False,
extra_padding=False,
match_prev_layer_dimensions=False,
data_format="channels_last",
**kwargs):
super(PnasUnit, self).__init__(**kwargs)
mid_channels = out_channels // 5
stride = 2 if reduction else 1
if match_prev_layer_dimensions:
self.conv_prev_1x1 = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_prev_1x1")
else:
self.conv_prev_1x1 = pnas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_prev_1x1")
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_1x1")
self.comb0_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_left")
self.comb0_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_left")
self.comb1_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_right")
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_right")
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
data_format=data_format,
name="comb3_left")
self.comb3_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb4_left")
if reduction:
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
data_format=data_format,
name="comb4_right")
else:
self.comb4_right = None
def call(self, x, x_prev, training=None):
x_prev = self.conv_prev_1x1(x_prev, training=training)
x = self.conv_1x1(x, training=training)
x_out = self.cell_forward(x, x_prev, training=training)
return x_out
class PNASNet(tf.keras.Model):
"""
PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem1_blocks_channels : list of 2 int
Number of output channels for the Stem1 unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
stem1_blocks_channels,
in_channels=3,
in_size=(331, 331),
classes=1000,
data_format="channels_last",
**kwargs):
super(PNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=2,
name="features")
self.features.add(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem1_blocks_channels,
data_format=data_format,
name="stem1_unit"))
prev_in_channels = in_channels
in_channels = stem1_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(
name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (j == 0)
extra_padding = (j == 0) and (i not in [0, 2])
match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0))
stage.add(PnasUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
reduction=reduction,
extra_padding=extra_padding,
match_prev_layer_dimensions=match_prev_layer_dimensions,
data_format=data_format,
name="unit{}".format(j + 1)))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.ReLU(name="activ"))
self.features.add(nn.AveragePooling2D(
pool_size=11,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(nn.Dropout(
rate=0.5,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_pnasnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PNASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
repeat = 4
init_block_channels = 96
stem_blocks_channels = [270, 540]
norm_channels = [1080, 2160, 4320]
channels = [[ci] * repeat for ci in norm_channels]
stem1_blocks_channels = stem_blocks_channels[0]
channels[0] = [stem_blocks_channels[1]] + channels[0]
net = PNASNet(
channels=channels,
init_block_channels=init_block_channels,
stem1_blocks_channels=stem1_blocks_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def pnasnet5large(**kwargs):
"""
PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pnasnet(model_name="pnasnet5large", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
pnasnet5large,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 331, 331) if is_channels_first(data_format) else (batch, 331, 331, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pnasnet5large or weight_count == 86057668)
if __name__ == "__main__":
_test()
| 23,512 | 31.253772 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/efficientnetedge.py | """
EfficientNet-Edge for ImageNet-1K, implemented in TensorFlow.
Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
"""
__all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock, SimpleSequential, is_channels_first
from .efficientnet import EffiInvResUnit, EffiInitBlock
class EffiEdgeResUnit(nn.Layer):
"""
EfficientNet-Edge edge residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
mid_from_in : bool
Whether to use input channel count for middle channel count calculation.
use_skip : bool
Whether to use skip connection.
bn_eps : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_factor,
se_factor,
mid_from_in,
use_skip,
bn_eps,
activation,
data_format="channels_last",
**kwargs):
super(EffiEdgeResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv1")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation,
data_format=data_format,
name="se")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
if self.residual:
identity = x
x = self.conv1(x, training=training)
if self.use_se:
x = self.se(x)
x = self.conv2(x, training=training)
if self.residual:
x = x + identity
return x
class EfficientNetEdge(tf.keras.Model):
"""
EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(EfficientNetEdge, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
activation = "relu"
self.features = SimpleSequential(name="features")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
mid_from_in = (i != 0)
use_skip = (i != 0)
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i < 3:
stage.add(EffiEdgeResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
mid_from_in=mid_from_in,
use_skip=use_skip,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="unit{}".format(j + 1)))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
bn_eps=bn_eps,
activation=activation,
tf_mode=tf_mode,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = SimpleSequential(name="output1")
if dropout_rate > 0.0:
self.output1.add(nn.Dropout(
rate=dropout_rate,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_efficientnet_edge(version,
in_size,
tf_mode=False,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create EfficientNet-Edge model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('small', 'medium', 'large').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
dropout_rate = 0.0
if version == "small":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
# dropout_rate = 0.2
elif version == "medium":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
# dropout_rate = 0.2
elif version == "large":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
# dropout_rate = 0.3
else:
raise ValueError("Unsupported EfficientNet-Edge version {}".format(version))
init_block_channels = 32
layers = [1, 2, 4, 5, 4, 2]
downsample = [1, 1, 1, 1, 0, 1]
channels_per_layers = [24, 32, 48, 96, 144, 192]
expansion_factors_per_layers = [4, 8, 8, 8, 8, 8]
kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5]
strides_per_stage = [1, 2, 2, 2, 1, 2]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNetEdge(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_eps=bn_eps,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def efficientnet_edge_small_b(in_size=(224, 224), **kwargs):
"""
EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_small_b", **kwargs)
def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs):
"""
EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_medium_b", **kwargs)
def efficientnet_edge_large_b(in_size=(300, 300), **kwargs):
"""
EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_eps=1e-3,
model_name="efficientnet_edge_large_b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
efficientnet_edge_small_b,
efficientnet_edge_medium_b,
efficientnet_edge_large_b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_edge_small_b or weight_count == 5438392)
assert (model != efficientnet_edge_medium_b or weight_count == 6899496)
assert (model != efficientnet_edge_large_b or weight_count == 10589712)
if __name__ == "__main__":
_test()
| 15,845 | 37 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/ibnresnext.py | """
IBN-ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['IBNResNeXt', 'ibn_resnext50_32x4d', 'ibn_resnext101_32x4d', 'ibn_resnext101_64x4d']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten, is_channels_first
from .resnet import ResInitBlock
from .ibnresnet import ibn_conv1x1_block
class IBNResNeXtBottleneck(nn.Layer):
"""
IBN-ResNeXt bottleneck block for residual path in IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
data_format="channels_last",
**kwargs):
super(IBNResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
use_ibn=conv1_ibn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class IBNResNeXtUnit(nn.Layer):
"""
IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
data_format="channels_last",
**kwargs):
super(IBNResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = IBNResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class IBNResNeXt(tf.keras.Model):
"""
IBN-ResNeXt model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IBNResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add(IBNResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_ibnresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IBN-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported IBN-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ibn_resnext50_32x4d(**kwargs):
"""
IBN-ResNeXt-50 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="ibn_resnext50_32x4d", **kwargs)
def ibn_resnext101_32x4d(**kwargs):
"""
IBN-ResNeXt-101 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="ibn_resnext101_32x4d", **kwargs)
def ibn_resnext101_64x4d(**kwargs):
"""
IBN-ResNeXt-101 (64x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="ibn_resnext101_64x4d", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ibn_resnext50_32x4d,
ibn_resnext101_32x4d,
ibn_resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnext50_32x4d or weight_count == 25028904)
assert (model != ibn_resnext101_32x4d or weight_count == 44177704)
assert (model != ibn_resnext101_64x4d or weight_count == 83455272)
if __name__ == "__main__":
_test()
| 12,035 | 32.620112 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/squeezenext.py | """
SqueezeNext for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import ConvBlock, conv1x1_block, conv7x7_block, MaxPool2d, SimpleSequential, flatten
class SqnxtUnit(nn.Layer):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(SqnxtUnit, self).__init__(**kwargs)
if strides == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
strides=strides,
use_bias=True,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
use_bias=True,
data_format=data_format,
name="conv2")
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
use_bias=True,
data_format=data_format,
name="conv3")
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
use_bias=True,
data_format=data_format,
name="conv4")
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv5")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=True,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(nn.Layer):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(SqnxtInitBlock, self).__init__(**kwargs)
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=1,
use_bias=True,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class SqueezeNext(tf.keras.Model):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNext, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
use_bias=True,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
if __name__ == "__main__":
_test()
| 13,713 | 32.367397 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/grmiposelite_coco.py | """
GRMIPose (Google PoseNet) for COCO Keypoint, implemented in TensorFlow (Lite).
Original paper: 'Towards Accurate Multi-person Pose Estimation in the Wild,' https://arxiv.org/abs/1701.01779.
"""
__all__ = ['GRMIPoseLite', 'grmiposelite_mobilenet_w1_coco']
import math
import numpy as np
import tensorflow as tf
class GRMIPoseLite(tf.keras.Model):
"""
GRMIPose (Google PoseNet) model from 'Towards Accurate Multi-person Pose Estimation in the Wild,'
https://arxiv.org/abs/1701.01779.
Parameters:
----------
interpreter : obj
Instance of the TFLite model interpreter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (257, 257)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
interpreter,
in_channels=3,
in_size=(257, 257),
keypoints=17,
data_format="channels_last",
**kwargs):
super(GRMIPoseLite, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.data_format = data_format
self.interpreter = interpreter
self.interpreter.allocate_tensors()
input_details = self.interpreter.get_input_details()
self.input_tensor_index = input_details[0]["index"]
self.in_shape = tuple(input_details[0]["shape"])
assert (self.in_size == self.in_shape[1:3])
self.output_tensor_index_list = [i["index"] for i in self.interpreter.get_output_details()]
def call(self, x, training=None):
x_np = x.numpy()
# import cv2
# cv2.imshow("x_np", x_np[0])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
assert (x_np.shape == self.in_shape)
self.interpreter.set_tensor(self.input_tensor_index, x_np)
self.interpreter.invoke()
heatmap = self.interpreter.get_tensor(self.output_tensor_index_list[0])
offsets = self.interpreter.get_tensor(self.output_tensor_index_list[1])
pts = np.zeros((self.keypoints, 3), np.float32)
oh, ow = heatmap.shape[1:3]
fh = self.in_size[0] / (oh - 1)
fw = self.in_size[1] / (ow - 1)
for k in range(self.keypoints):
max_h = heatmap[0, 0, 0, 0]
max_i = 0
max_j = 0
for i in range(oh):
for j in range(ow):
h = heatmap[0, i, j, k]
if h > max_h:
max_h = h
max_i = i
max_j = j
pts[k, 0] = max_i * fh + offsets[0, max_i, max_j, k]
pts[k, 1] = max_j * fw + offsets[0, max_i, max_j, k + self.keypoints]
pts[k, 2] = self.sigmoid(max_h)
pts1 = pts.copy()
for k in range(self.keypoints):
pts1[k, 0] = 0.25 * pts[k, 1]
pts1[k, 1] = 0.25 * pts[k, 0]
y = tf.convert_to_tensor(np.expand_dims(pts1, axis=0))
# import cv2
# canvas = x_np[0]
# canvas = cv2.cvtColor(canvas, code=cv2.COLOR_BGR2RGB)
# for k in range(self.keypoints):
# cv2.circle(
# canvas,
# (pts[k, 1], pts[k, 0]),
# 3,
# (0, 0, 255),
# -1)
# scale_factor = 3
# cv2.imshow(
# winname="canvas",
# mat=cv2.resize(
# src=canvas,
# dsize=None,
# fx=scale_factor,
# fy=scale_factor,
# interpolation=cv2.INTER_NEAREST))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return y
@staticmethod
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
def get_grmiposelite(model_path,
keypoints,
model_name=None,
data_format="channels_last",
pretrained=False,
**kwargs):
"""
Create GRMIPose (Google PoseNet) model with specific parameters.
Parameters:
----------
model_path : str
Path to pretrained model.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
"""
assert (pretrained is not None)
assert (model_name is not None)
if (model_path is None) or (not model_path):
raise ValueError("Parameter `model_path` should be properly initialized for loading pretrained model.")
interpreter = tf.lite.Interpreter(model_path=model_path)
net = GRMIPoseLite(
interpreter=interpreter,
keypoints=keypoints,
data_format=data_format,
**kwargs)
return net
def grmiposelite_mobilenet_w1_coco(model_path, keypoints=17, data_format="channels_last", pretrained=False, **kwargs):
"""
GRMIPose (Google PoseNet) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Towards Accurate
Multi-person Pose Estimation in the Wild,' https://arxiv.org/abs/1701.01779.
Parameters:
----------
model_path : str
Path to pretrained model.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_grmiposelite(model_path=model_path, keypoints=keypoints, model_name="grmiposelite_mobilenet_w1_coco",
data_format=data_format, pretrained=pretrained, **kwargs)
def _test():
data_format = "channels_last"
in_size = (257, 257)
keypoints = 17
pretrained = False
model_path = ""
models = [
grmiposelite_mobilenet_w1_coco,
]
for model in models:
net = model(model_path=model_path, pretrained=pretrained, in_size=in_size, data_format=data_format)
batch = 1
x = tf.random.normal((batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))
if __name__ == "__main__":
_test()
| 6,726 | 32.137931 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/bisenet.py | """
BiSeNet for CelebAMask-HQ, implemented in TensorFlow.
Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
"""
__all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential, get_channel_axis,\
get_im_size, is_channels_first
from .resnet import resnet18
class PyramidPoolingZeroBranch(nn.Layer):
"""
Pyramid pooling zero branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int
Spatial size of output image for the upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
data_format="channels_last",
**kwargs):
super(PyramidPoolingZeroBranch, self).__init__(**kwargs)
self.in_size = in_size
self.data_format = data_format
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.up = InterpolationBlock(
scale_factor=None,
interpolation="bilinear",
data_format=data_format,
name="up")
def call(self, x, training=None):
in_size = self.in_size if self.in_size is not None else get_im_size(x, data_format=self.data_format)
x = self.pool(x)
axis = -1 if is_channels_first(self.data_format) else 1
x = tf.expand_dims(tf.expand_dims(x, axis=axis), axis=axis)
x = self.conv(x, training=training)
x = self.up(x, size=in_size)
return x
class AttentionRefinementBlock(nn.Layer):
"""
Attention refinement block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(AttentionRefinementBlock, self).__init__(**kwargs)
self.data_format = data_format
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv1")
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv2 = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
activation="sigmoid",
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
w = self.pool(x)
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
w = self.conv2(w, training=training)
x = x * w
return x
class PyramidPoolingMainBranch(nn.Layer):
"""
Pyramid pooling main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : float
Multiplier for spatial size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
data_format="channels_last",
**kwargs):
super(PyramidPoolingMainBranch, self).__init__(**kwargs)
self.att = AttentionRefinementBlock(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="att")
self.up = InterpolationBlock(
scale_factor=scale_factor,
interpolation="bilinear",
data_format=data_format,
name="up")
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
def call(self, x, y, training=None):
x = self.att(x, training=training)
x = x + y
x = self.up(x)
x = self.conv(x, training=training)
return x
class FeatureFusion(nn.Layer):
"""
Feature fusion block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4,
data_format="channels_last",
**kwargs):
super(FeatureFusion, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = out_channels // reduction
self.conv_merge = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv_merge")
self.pool = nn.GlobalAveragePooling2D(
data_format=data_format,
name="pool")
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.activ = nn.ReLU()
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
self.sigmoid = tf.nn.sigmoid
def call(self, x, y, training=None):
x = tf.concat([x, y], axis=get_channel_axis(self.data_format))
x = self.conv_merge(x, training=training)
w = self.pool(x)
axis = -1 if is_channels_first(self.data_format) else 1
w = tf.expand_dims(tf.expand_dims(w, axis=axis), axis=axis)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x_att = x * w
x = x + x_att
return x
class PyramidPooling(nn.Layer):
"""
Pyramid Pooling module.
Parameters:
----------
x16_in_channels : int
Number of input channels for x16.
x32_in_channels : int
Number of input channels for x32.
y_out_channels : int
Number of output channels for y-outputs.
y32_out_size : tuple of 2 int
Spatial size of the y32 tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
x16_in_channels,
x32_in_channels,
y_out_channels,
y32_out_size,
data_format="channels_last",
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
z_out_channels = 2 * y_out_channels
self.pool32 = PyramidPoolingZeroBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
in_size=y32_out_size,
data_format=data_format,
name="pool32")
self.pool16 = PyramidPoolingMainBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
scale_factor=2,
data_format=data_format,
name="pool16")
self.pool8 = PyramidPoolingMainBranch(
in_channels=x16_in_channels,
out_channels=y_out_channels,
scale_factor=2,
data_format=data_format,
name="pool8")
self.fusion = FeatureFusion(
in_channels=z_out_channels,
out_channels=z_out_channels,
data_format=data_format,
name="fusion")
def call(self, x8, x16, x32, training=None):
y32 = self.pool32(x32, training=training)
y16 = self.pool16(x32, y32, training=training)
y8 = self.pool8(x16, y16, training=training)
z8 = self.fusion(x8, y8, training=training)
return z8, y8, y16
class BiSeHead(nn.Layer):
"""
BiSeNet head (final) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(BiSeHead, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x)
return x
class BiSeNet(tf.keras.Model):
"""
BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
Parameters:
----------
backbone : func -> nn.Sequential
Feature extractor.
aux : bool, default True
Whether to output an auxiliary results.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 480)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
aux=True,
fixed_size=True,
in_channels=3,
in_size=(640, 480),
classes=19,
data_format="channels_last",
**kwargs):
super(BiSeNet, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.aux = aux
self.fixed_size = fixed_size
self.backbone, backbone_out_channels = backbone(
data_format=data_format,
name="backbone")
y_out_channels = backbone_out_channels[0]
z_out_channels = 2 * y_out_channels
y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
self.pool = PyramidPooling(
x16_in_channels=backbone_out_channels[1],
x32_in_channels=backbone_out_channels[2],
y_out_channels=y_out_channels,
y32_out_size=y32_out_size,
data_format=data_format,
name="pool")
self.head_z8 = BiSeHead(
in_channels=z_out_channels,
mid_channels=z_out_channels,
out_channels=classes,
data_format=data_format,
name="head_z8")
self.up8 = InterpolationBlock(
scale_factor=(8 if fixed_size else None),
data_format=data_format,
name="up8")
if self.aux:
mid_channels = y_out_channels // 2
self.head_y8 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes,
data_format=data_format,
name="head_y8")
self.head_y16 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes,
data_format=data_format,
name="head_y16")
self.up16 = InterpolationBlock(
scale_factor=(16 if fixed_size else None),
data_format=data_format,
name="up16")
def call(self, x, training=None):
assert is_channels_first(self.data_format) or ((x.shape[1] % 32 == 0) and (x.shape[2] % 32 == 0))
assert (not is_channels_first(self.data_format)) or ((x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0))
x8, x16, x32 = self.backbone(x, training=training)
z8, y8, y16 = self.pool(x8, x16, x32, training=training)
z8 = self.head_z8(z8, training=training)
z8 = self.up8(z8)
if self.aux:
y8 = self.head_y8(y8, training=training)
y16 = self.head_y16(y16, training=training)
y8 = self.up8(y8)
y16 = self.up16(y16)
return z8, y8, y16
else:
return z8
def get_bisenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create BiSeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = BiSeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, classes=19, **kwargs):
"""
BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation
Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
def backbone(**bb_kwargs):
features_raw = resnet18(pretrained=pretrained_backbone, **bb_kwargs).features
del features_raw.children[-1]
features = MultiOutputSequential(return_last=False, name="backbone")
features.add(features_raw.children[0])
for i, stage in enumerate(features_raw.children[1:]):
if i != 0:
stage.do_output = True
features.add(stage)
out_channels = [128, 256, 512]
return features, out_channels
return get_bisenet(backbone=backbone, classes=classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (640, 480)
aux = True
pretrained = False
models = [
bisenet_resnet18_celebamaskhq,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == 19) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == 19) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416)
else:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272)
if __name__ == "__main__":
_test()
| 17,516 | 32.429389 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnet.py | """
ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock', 'get_resnet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, conv7x7_block, MaxPool2d, SimpleSequential, flatten, is_channels_first
class ResBlock(nn.Layer):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
data_format="channels_last",
**kwargs):
super(ResBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class ResBottleneck(nn.Layer):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
padding=padding,
dilation=dilation,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class ResUnit(nn.Layer):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bottleneck=True,
conv1_stride=False,
data_format="channels_last",
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(nn.Layer):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(ResInitBlock, self).__init__(**kwargs)
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class ResNet(tf.keras.Model):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
if __name__ == "__main__":
_test()
| 27,599 | 32.948339 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/simpleposemobile_coco.py | """
SimplePose(Mobile) for COCO Keypoint, implemented in TensorFlow.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco',
'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco',
'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco']
import os
import tensorflow as tf
from .common import conv1x1, DucBlock, HeatmapMaxDetBlock, SimpleSequential, is_channels_first
from .resnet import resnet18, resnet50b
from .mobilenet import mobilenet_w1
from .mobilenetv2 import mobilenetv2b_w1
from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1
class SimplePoseMobile(tf.keras.Model):
"""
SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,'
https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
decoder_init_block_channels : int
Number of output channels for the initial unit of the decoder.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
decoder_init_block_channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17,
data_format="channels_last",
**kwargs):
super(SimplePoseMobile, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.data_format = data_format
self.backbone = backbone
self.backbone._name = "backbone"
self.decoder = SimpleSequential(name="decoder")
in_channels = backbone_out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=decoder_init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = decoder_init_block_channels
for i, out_channels in enumerate(channels):
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=keypoints,
data_format=data_format,
name="final_block"))
self.heatmap_max_det = HeatmapMaxDetBlock(
data_format=data_format,
name="heatmap_max_det")
def call(self, x, training=None):
x = self.backbone(x, training=training)
heatmap = self.decoder(x, training=training)
if self.return_heatmap or not tf.executing_eagerly():
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simpleposemobile(backbone,
backbone_out_channels,
keypoints,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SimplePose(Mobile) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [128, 64, 32]
decoder_init_block_channels = 256
net = SimplePoseMobile(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
decoder_init_block_channels=decoder_init_block_channels,
keypoints=keypoints,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
model_name="simplepose_mobile_resnet18_coco", data_format=data_format, **kwargs)
def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="simplepose_mobile_resnet50b_coco", data_format=data_format, **kwargs)
def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = mobilenet_w1(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints,
model_name="simplepose_mobile_mobilenet_w1_coco", data_format=data_format, **kwargs)
def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last",
**kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv2b_w1(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv2b_w1_coco", data_format=data_format, **kwargs)
def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last",
**kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv3_small_w1_coco", data_format=data_format,
**kwargs)
def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last",
**kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints,
model_name="simplepose_mobile_mobilenetv3_large_w1_coco", data_format=data_format,
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (256, 192)
keypoints = 17
pretrained_backbone = False
return_heatmap = False
pretrained = False
models = [
simplepose_mobile_resnet18_coco,
simplepose_mobile_resnet50b_coco,
simplepose_mobile_mobilenet_w1_coco,
simplepose_mobile_mobilenetv2b_w1_coco,
simplepose_mobile_mobilenetv3_small_w1_coco,
simplepose_mobile_mobilenetv3_large_w1_coco,
]
for model in models:
net = model(pretrained_backbone=pretrained_backbone, keypoints=keypoints, pretrained=pretrained,
in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
if is_channels_first(data_format):
assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and
(y.shape[3] == x.shape[3] // 4))
else:
assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and
(y.shape[2] == x.shape[2] // 4))
else:
assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208)
assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944)
assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744)
assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176)
assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088)
assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336)
if __name__ == "__main__":
_test()
| 15,320 | 41.558333 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/cbamresnet.py | """
CBAM-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv7x7_block, SimpleSequential, flatten, is_channels_first, get_channel_axis
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(nn.Layer):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction_ratio=16,
data_format="channels_last",
**kwargs):
super(MLP, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = channels // reduction_ratio
self.fc1 = nn.Dense(
units=mid_channels,
input_dim=channels,
name="fc1")
self.activ = nn.ReLU()
self.fc2 = nn.Dense(
units=channels,
input_dim=mid_channels,
name="fc2")
def call(self, x, training=None):
# x = flatten(x, self.data_format)
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(nn.Layer):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction_ratio=16,
data_format="channels_last",
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
self.data_format = data_format
self.avg_pool = nn.GlobalAvgPool2D(
data_format=data_format,
name="avg_pool")
self.max_pool = nn.GlobalMaxPool2D(
data_format=data_format,
name="max_pool")
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio,
data_format=data_format,
name="mlp")
self.sigmoid = tf.nn.sigmoid
def call(self, x, training=None):
att1 = self.avg_pool(x)
att1 = self.mlp(att1)
att2 = self.max_pool(x)
att2 = self.mlp(att2)
att = att1 + att2
att = self.sigmoid(att)
if is_channels_first(self.data_format):
att = tf.broadcast_to(tf.expand_dims(tf.expand_dims(att, 2), 3), shape=x.shape)
else:
att = tf.broadcast_to(tf.expand_dims(tf.expand_dims(att, 1), 2), shape=x.shape)
x = x * att
return x
class SpatialGate(nn.Layer):
"""
CBAM spatial gate block.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(SpatialGate, self).__init__(**kwargs)
self.data_format = data_format
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
activation=None,
data_format=data_format,
name="conv")
self.sigmoid = tf.nn.sigmoid
def call(self, x, training=None):
axis = get_channel_axis(self.data_format)
att1 = tf.math.reduce_max(x, axis=axis, keepdims=True)
att2 = tf.math.reduce_mean(x, axis=axis, keepdims=True)
att = tf.concat([att1, att2], axis=axis)
att = self.conv(att, training=training)
att = tf.broadcast_to(self.sigmoid(att), shape=x.shape)
x = x * att
return x
class CbamBlock(nn.Layer):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction_ratio=16,
data_format="channels_last",
**kwargs):
super(CbamBlock, self).__init__(**kwargs)
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio,
data_format=data_format,
name="ch_gate")
self.sp_gate = SpatialGate(
data_format=data_format,
name="sp_gate")
def call(self, x, training=None):
x = self.ch_gate(x, training=training)
x = self.sp_gate(x, training=training)
return x
class CbamResUnit(nn.Layer):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
data_format="channels_last",
**kwargs):
super(CbamResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=False,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.cbam = CbamBlock(
channels=out_channels,
data_format=data_format,
name="cbam")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = self.cbam(x, training=training)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(tf.keras.Model):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(CbamResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
cbam_resnet18,
cbam_resnet34,
cbam_resnet50,
cbam_resnet101,
cbam_resnet152,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cbam_resnet18 or weight_count == 11779392)
assert (model != cbam_resnet34 or weight_count == 21960468)
assert (model != cbam_resnet50 or weight_count == 28089624)
assert (model != cbam_resnet101 or weight_count == 49330172)
assert (model != cbam_resnet152 or weight_count == 66826848)
if __name__ == "__main__":
_test()
| 15,596 | 30.830612 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/diracnetv2.py | """
DiracNetV2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
"""
__all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, MaxPool2d, SimpleSequential, flatten, is_channels_first
class DiracConv(nn.Layer):
"""
DiracNetV2 specific convolution block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
data_format="channels_last",
**kwargs):
super(DiracConv, self).__init__(**kwargs)
self.activ = nn.ReLU()
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.activ(x)
x = self.conv(x)
return x
def dirac_conv3x3(in_channels,
out_channels,
data_format="channels_last",
**kwargs):
"""
3x3 version of the DiracNetV2 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DiracConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
data_format=data_format,
**kwargs)
class DiracInitBlock(nn.Layer):
"""
DiracNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(DiracInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.pool(x)
return x
class DiracNetV2(tf.keras.Model):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DiracNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(DiracInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stage.add(dirac_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(MaxPool2d(
pool_size=2,
strides=2,
padding=0,
data_format=data_format,
name="pool{}".format(i + 1)))
self.features.add(stage)
self.features.add(nn.ReLU(name="final_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_diracnetv2(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DiracNetV2 model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [4, 4, 4, 4]
elif blocks == 34:
layers = [6, 8, 12, 6]
else:
raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels = 64
net = DiracNetV2(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def diracnet18v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs)
def diracnet34v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
diracnet18v2,
diracnet34v2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diracnet18v2 or weight_count == 11511784)
assert (model != diracnet34v2 or weight_count == 21616232)
if __name__ == "__main__":
_test()
| 9,781 | 30.152866 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/sepreresnet_cifar.py | """
SE-PreResNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn',
'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn',
'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn',
'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn',
'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn',
'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn',
'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn',
'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first
from .sepreresnet import SEPreResUnit
class CIFARSEPreResNet(tf.keras.Model):
"""
SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARSEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_sepreresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sepreresnet20_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10",
**kwargs)
def sepreresnet20_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100",
**kwargs)
def sepreresnet20_svhn(classes=10, **kwargs):
"""
SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn",
**kwargs)
def sepreresnet56_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10",
**kwargs)
def sepreresnet56_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100",
**kwargs)
def sepreresnet56_svhn(classes=10, **kwargs):
"""
SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn",
**kwargs)
def sepreresnet110_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10",
**kwargs)
def sepreresnet110_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100",
**kwargs)
def sepreresnet110_svhn(classes=10, **kwargs):
"""
SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn",
**kwargs)
def sepreresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10",
**kwargs)
def sepreresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100",
**kwargs)
def sepreresnet164bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn",
**kwargs)
def sepreresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10",
**kwargs)
def sepreresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100",
**kwargs)
def sepreresnet272bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn",
**kwargs)
def sepreresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10",
**kwargs)
def sepreresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100",
**kwargs)
def sepreresnet542bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn",
**kwargs)
def sepreresnet1001_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar10",
**kwargs)
def sepreresnet1001_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar100",
**kwargs)
def sepreresnet1001_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_svhn",
**kwargs)
def sepreresnet1202_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar10",
**kwargs)
def sepreresnet1202_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar100",
**kwargs)
def sepreresnet1202_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(sepreresnet20_cifar10, 10),
(sepreresnet20_cifar100, 100),
(sepreresnet20_svhn, 10),
(sepreresnet56_cifar10, 10),
(sepreresnet56_cifar100, 100),
(sepreresnet56_svhn, 10),
(sepreresnet110_cifar10, 10),
(sepreresnet110_cifar100, 100),
(sepreresnet110_svhn, 10),
(sepreresnet164bn_cifar10, 10),
(sepreresnet164bn_cifar100, 100),
(sepreresnet164bn_svhn, 10),
(sepreresnet272bn_cifar10, 10),
(sepreresnet272bn_cifar100, 100),
(sepreresnet272bn_svhn, 10),
(sepreresnet542bn_cifar10, 10),
(sepreresnet542bn_cifar100, 100),
(sepreresnet542bn_svhn, 10),
(sepreresnet1001_cifar10, 10),
(sepreresnet1001_cifar100, 100),
(sepreresnet1001_svhn, 10),
(sepreresnet1202_cifar10, 10),
(sepreresnet1202_cifar100, 100),
(sepreresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet20_cifar10 or weight_count == 274559)
assert (model != sepreresnet20_cifar100 or weight_count == 280409)
assert (model != sepreresnet20_svhn or weight_count == 274559)
assert (model != sepreresnet56_cifar10 or weight_count == 862601)
assert (model != sepreresnet56_cifar100 or weight_count == 868451)
assert (model != sepreresnet56_svhn or weight_count == 862601)
assert (model != sepreresnet110_cifar10 or weight_count == 1744664)
assert (model != sepreresnet110_cifar100 or weight_count == 1750514)
assert (model != sepreresnet110_svhn or weight_count == 1744664)
assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882)
assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012)
assert (model != sepreresnet164bn_svhn or weight_count == 1904882)
assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450)
assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580)
assert (model != sepreresnet272bn_svhn or weight_count == 3152450)
assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370)
assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500)
assert (model != sepreresnet542bn_svhn or weight_count == 6271370)
assert (model != sepreresnet1001_cifar10 or weight_count == 11573534)
assert (model != sepreresnet1001_cifar100 or weight_count == 11596664)
assert (model != sepreresnet1001_svhn or weight_count == 11573534)
assert (model != sepreresnet1202_cifar10 or weight_count == 19581938)
assert (model != sepreresnet1202_cifar100 or weight_count == 19587788)
assert (model != sepreresnet1202_svhn or weight_count == 19581938)
if __name__ == "__main__":
_test()
| 24,762 | 37.511664 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/danet.py | """
DANet for image segmentation, implemented in TensorFlow.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine.input_spec import InputSpec
from .common import conv1x1, conv3x3_block, is_channels_first, interpolate_im, get_im_size
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(nn.Layer):
"""
Simple scale block.
Parameters:
----------
alpha_initializer : str, default 'zeros'
Initializer function for the weights.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
alpha_initializer="zeros",
data_format="channels_last",
**kwargs):
super(ScaleBlock, self).__init__(**kwargs)
self.data_format = data_format
self.alpha_initializer = initializers.get(alpha_initializer)
def build(self, input_shape):
self.alpha = self.add_weight(
shape=(1,),
name="alpha",
initializer=self.alpha_initializer,
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=True)
channel_axis = (1 if is_channels_first(self.data_format) else len(input_shape) - 1)
axes = {}
for i in range(1, len(input_shape)):
if i != channel_axis:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, x, training=None):
return self.alpha * x
def get_config(self):
config = {
"alpha_initializer": initializers.serialize(self.alpha_initializer),
"data_format": self.data_format,
}
base_config = super(ScaleBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class PosAttBlock(nn.Layer):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
reduction=8,
data_format="channels_last",
**kwargs):
super(PosAttBlock, self).__init__(**kwargs)
self.data_format = data_format
mid_channels = channels // reduction
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name="query_conv")
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name="key_conv")
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
use_bias=True,
data_format=data_format,
name="value_conv")
self.scale = ScaleBlock(
data_format=data_format,
name="scale")
self.softmax = nn.Softmax(axis=-1)
def call(self, x, training=None):
proj_query = self.query_conv(x)
proj_key = self.key_conv(x)
proj_value = self.value_conv(x)
if not is_channels_first(self.data_format):
proj_query = tf.transpose(proj_query, perm=(0, 3, 1, 2))
proj_key = tf.transpose(proj_key, perm=(0, 3, 1, 2))
proj_value = tf.transpose(proj_value, perm=(0, 3, 1, 2))
batch, channels, height, width = proj_query.shape
proj_query = tf.reshape(proj_query, shape=(batch, -1, height * width))
proj_key = tf.reshape(proj_key, shape=(batch, -1, height * width))
proj_value = tf.reshape(proj_value, shape=(batch, -1, height * width))
energy = tf.keras.backend.batch_dot(tf.transpose(proj_query, perm=(0, 2, 1)), proj_key)
w = self.softmax(energy)
y = tf.keras.backend.batch_dot(proj_value, tf.transpose(w, perm=(0, 2, 1)))
y = tf.reshape(y, shape=(batch, -1, height, width))
if not is_channels_first(self.data_format):
y = tf.transpose(y, perm=(0, 2, 3, 1))
y = self.scale(y, training=training) + x
return y
class ChaAttBlock(nn.Layer):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(ChaAttBlock, self).__init__(**kwargs)
self.data_format = data_format
self.scale = ScaleBlock(
data_format=data_format,
name="scale")
self.softmax = nn.Softmax(axis=-1)
def call(self, x, training=None):
proj_query = x
proj_key = x
proj_value = x
if not is_channels_first(self.data_format):
proj_query = tf.transpose(proj_query, perm=(0, 3, 1, 2))
proj_key = tf.transpose(proj_key, perm=(0, 3, 1, 2))
proj_value = tf.transpose(proj_value, perm=(0, 3, 1, 2))
batch, channels, height, width = proj_query.shape
proj_query = tf.reshape(proj_query, shape=(batch, -1, height * width))
proj_key = tf.reshape(proj_key, shape=(batch, -1, height * width))
proj_value = tf.reshape(proj_value, shape=(batch, -1, height * width))
energy = tf.keras.backend.batch_dot(proj_query, tf.transpose(proj_key, perm=(0, 2, 1)))
energy_new = tf.broadcast_to(tf.math.reduce_max(energy, axis=-1, keepdims=True), shape=energy.shape) - energy
w = self.softmax(energy_new)
y = tf.keras.backend.batch_dot(w, proj_value)
y = tf.reshape(y, shape=(batch, -1, height, width))
if not is_channels_first(self.data_format):
y = tf.transpose(y, perm=(0, 2, 3, 1))
y = self.scale(y, training=training) + x
return y
class DANetHeadBranch(nn.Layer):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True,
data_format="channels_last",
**kwargs):
super(DANetHeadBranch, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
if pose_att:
self.att = PosAttBlock(
mid_channels,
data_format=data_format,
name="att")
else:
self.att = ChaAttBlock(
data_format=data_format,
name="att")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv3")
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.att(x, training=training)
y = self.conv2(x, training=training)
x = self.conv3(y)
x = self.dropout(x, training=training)
return x, y
class DANetHead(nn.Layer):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(DANetHead, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True,
data_format=data_format,
name="branch_pa")
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False,
data_format=data_format,
name="branch_ca")
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv")
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
pa_x, pa_y = self.branch_pa(x, training=training)
ca_x, ca_y = self.branch_ca(x, training=training)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x, training=training)
return x, pa_x, ca_x
class DANet(tf.keras.Model):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=19,
data_format="channels_last",
**kwargs):
super(DANet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
self.data_format = data_format
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=classes,
data_format=data_format,
name="head")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
x, _ = self.backbone(x, training=training)
x, y, z = self.head(x, training=training)
x = interpolate_im(x, out_size=in_size, data_format=self.data_format)
if self.aux:
y = interpolate_im(y, out_size=in_size, data_format=self.data_format)
z = interpolate_im(z, out_size=in_size, data_format=self.data_format)
return x, y, z
else:
return x
def get_danet(backbone,
classes,
aux=False,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
classes=classes,
aux=aux,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last", **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
data_format=data_format, **kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, data_format="channels_last",
**kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (480, 480)
aux = False
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, data_format=data_format)
batch = 14
classes = 19
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0])
if is_channels_first(data_format):
assert ((y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and (y.shape[3] == x.shape[3]))
else:
assert ((y.shape[3] == classes) and (y.shape[1] == x.shape[1]) and (y.shape[2] == x.shape[2]))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
if __name__ == "__main__":
_test()
| 18,175 | 34.156673 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mobilenetv2.py | """
MobileNetV2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1',
'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import ReLU6, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, SimpleSequential, flatten,\
is_channels_first
class LinearBottleneck(nn.Layer):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
expansion : bool
Whether do expansion of channels.
remove_exp_conv : bool
Whether to remove expansion convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
expansion,
remove_exp_conv,
data_format="channels_last",
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
self.use_exp_conv = (expansion or (not remove_exp_conv))
if self.use_exp_conv:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=ReLU6(),
data_format=data_format,
name="conv1")
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=ReLU6(),
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
if self.residual:
x = x + identity
return x
class MobileNetV2(tf.keras.Model):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
remove_exp_conv : bool
Whether to remove expansion convolution.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
remove_exp_conv,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
activation=ReLU6(),
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
remove_exp_conv=remove_exp_conv,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation=ReLU6(),
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=False,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_mobilenetv2(width_scale,
remove_exp_conv=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
remove_exp_conv : bool, default False
Whether to remove expansion convolution.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
remove_exp_conv=remove_exp_conv,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def mobilenetv2b_w1(**kwargs):
"""
1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs)
def mobilenetv2b_w3d4(**kwargs):
"""
0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs)
def mobilenetv2b_wd2(**kwargs):
"""
0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs)
def mobilenetv2b_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
mobilenetv2b_w1,
mobilenetv2b_w3d4,
mobilenetv2b_wd2,
mobilenetv2b_wd4,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
assert (model != mobilenetv2b_w1 or weight_count == 3503872)
assert (model != mobilenetv2b_w3d4 or weight_count == 2626968)
assert (model != mobilenetv2b_wd2 or weight_count == 1964448)
assert (model != mobilenetv2b_wd4 or weight_count == 1516312)
if __name__ == "__main__":
_test()
| 13,837 | 34.121827 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/squeezenet.py | """
SqueezeNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import get_channel_axis, Conv2d, MaxPool2d, SimpleSequential, flatten
class FireConv(nn.Layer):
"""
SqueezeNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
data_format="channels_last",
**kwargs):
super(FireConv, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x):
x = self.conv(x)
x = self.activ(x)
return x
class FireUnit(nn.Layer):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual,
data_format="channels_last",
**kwargs):
super(FireUnit, self).__init__(**kwargs)
self.residual = residual
self.data_format = data_format
self.squeeze = FireConv(
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name="squeeze")
self.expand1x1 = FireConv(
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name="expand1x1")
self.expand3x3 = FireConv(
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1,
data_format=data_format,
name="expand3x3")
def call(self, x):
if self.residual:
identity = x
x = self.squeeze(x)
y1 = self.expand1x1(x)
y2 = self.expand3x3(x)
out = tf.concat([y1, y2], axis=get_channel_axis(self.data_format))
if self.residual:
out = out + identity
return out
class SqueezeInitBlock(nn.Layer):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
data_format="channels_last",
**kwargs):
super(SqueezeInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=2,
data_format=data_format,
name="conv")
self.activ = nn.ReLU()
def call(self, x):
x = self.conv(x)
x = self.activ(x)
return x
class SqueezeNet(tf.keras.Model):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SqueezeInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
stage.add(MaxPool2d(
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name="pool{}".format(i + 1)))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
stage.add(FireUnit(
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((residuals is not None) and (residuals[i][j] == 1)),
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Dropout(
rate=0.5,
name="dropout"))
self.output1 = SimpleSequential(name="output1")
self.output1.add(Conv2d(
in_channels=in_channels,
out_channels=classes,
kernel_size=1,
data_format=data_format,
name="final_conv"))
self.output1.add(nn.ReLU())
self.output1.add(nn.AveragePooling2D(
pool_size=13,
strides=1,
data_format=data_format,
name="final_pool"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "1.0":
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == "1.1":
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
squeezeresnet_v1_0,
squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
if __name__ == "__main__":
_test()
| 13,417 | 32.212871 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/vgg.py | """
VGG for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, MaxPool2d, SimpleSequential, flatten
class VGGDense(nn.Layer):
"""
VGG specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(VGGDense, self).__init__(**kwargs)
self.fc = nn.Dense(
units=out_channels,
input_dim=in_channels,
name="fc")
self.activ = nn.ReLU()
self.dropout = nn.Dropout(
rate=0.5,
name="dropout")
def call(self, x, training=None):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x, training=training)
return x
class VGGOutputBlock(nn.Layer):
"""
VGG specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(VGGOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
self.fc1 = VGGDense(
in_channels=in_channels,
out_channels=mid_channels,
name="fc1")
self.fc2 = VGGDense(
in_channels=mid_channels,
out_channels=mid_channels,
name="fc2")
self.fc3 = nn.Dense(
units=classes,
input_dim=mid_channels,
name="fc3")
def call(self, x, training=None):
x = self.fc1(x, training=training)
x = self.fc2(x, training=training)
x = self.fc3(x)
return x
class VGG(tf.keras.Model):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
use_bias=True,
use_bn=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(VGG, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
stage.add(MaxPool2d(
pool_size=2,
strides=2,
padding=0,
data_format=data_format,
name="pool{}".format(i + 1)))
self.features.add(stage)
self.output1 = VGGOutputBlock(
in_channels=(in_channels * 7 * 7),
classes=classes,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_vgg(blocks,
use_bias=True,
use_bn=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
use_bias=use_bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
if __name__ == "__main__":
_test()
| 14,207 | 31 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnet_cub.py | """
ResNet for CUB-200-2011, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .common import is_channels_first
from .resnet import get_resnet
def resnet10_cub(classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub",
**kwargs)
def resnet16_cub(classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub",
**kwargs)
def resnet34_cub(classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub",
**kwargs)
def resnet50_cub(classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
if __name__ == "__main__":
_test()
| 14,084 | 35.489637 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/bagnet.py | """
BagNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
"""
__all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock, SimpleSequential, flatten, is_channels_first
class BagNetBottleneck(nn.Layer):
"""
BagNet bottleneck block for residual path in BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second convolution.
strides : int or tuple/list of 2 int
Strides of the second convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(BagNetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=0,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class BagNetUnit(nn.Layer):
"""
BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second body convolution.
strides : int or tuple/list of 2 int
Strides of the second body convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
data_format="channels_last",
**kwargs):
super(BagNetUnit, self).__init__(**kwargs)
self.data_format = data_format
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = BagNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
if x.shape[-2] != identity.shape[-2]:
diff = identity.shape[-2] - x.shape[-2]
if is_channels_first(self.data_format):
identity = identity[:, :, :-diff, :-diff]
else:
identity = identity[:, :-diff, :-diff, :]
x = x + identity
x = self.activ(x)
return x
class BagNetInitBlock(nn.Layer):
"""
BagNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(BagNetInitBlock, self).__init__(**kwargs)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=0,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class BagNet(tf.keras.Model):
"""
BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
normal_kernel_sizes : list of int
Count of the first units with 3x3 convolution window size for each stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_pool_size,
normal_kernel_sizes,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(BagNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(BagNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != len(channels) - 1) else 1
kernel_size = 3 if j < normal_kernel_sizes[i] else 1
stage.add(BagNetUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=final_pool_size,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_bagnet(field,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create BagNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
layers = [3, 4, 6, 3]
if field == 9:
normal_kernel_sizes = [1, 1, 0, 0]
final_pool_size = 27
elif field == 17:
normal_kernel_sizes = [1, 1, 1, 0]
final_pool_size = 26
elif field == 33:
normal_kernel_sizes = [1, 1, 1, 1]
final_pool_size = 24
else:
raise ValueError("Unsupported BagNet with field: {}".format(field))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BagNet(
channels=channels,
init_block_channels=init_block_channels,
final_pool_size=final_pool_size,
normal_kernel_sizes=normal_kernel_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def bagnet9(**kwargs):
"""
BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=9, model_name="bagnet9", **kwargs)
def bagnet17(**kwargs):
"""
BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=17, model_name="bagnet17", **kwargs)
def bagnet33(**kwargs):
"""
BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=33, model_name="bagnet33", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
bagnet9,
bagnet17,
bagnet33,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bagnet9 or weight_count == 15688744)
assert (model != bagnet17 or weight_count == 16213032)
assert (model != bagnet33 or weight_count == 18310184)
if __name__ == "__main__":
_test()
| 12,719 | 31.868217 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/airnet.py | """
AirNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, MaxPool2d, SimpleSequential, flatten, is_channels_first
class AirBlock(nn.Layer):
"""
AirNet attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int, default 1
Number of groups.
ratio: int, default 2
Air compression ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
groups=1,
ratio=2,
data_format="channels_last",
**kwargs):
super(AirBlock, self).__init__(**kwargs)
assert (out_channels % ratio == 0)
mid_channels = out_channels // ratio
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=groups,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
self.sigmoid = tf.nn.sigmoid
self.upsample = nn.UpSampling2D(
size=(2, 2),
data_format=data_format,
interpolation="bilinear",
name="upsample")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.pool(x)
x = self.conv2(x, training=training)
x = self.upsample(x)
x = self.conv3(x, training=training)
x = self.sigmoid(x)
return x
class AirBottleneck(nn.Layer):
"""
AirNet bottleneck block for residual path in AirNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
ratio: int
Air compression ratio.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
ratio,
data_format="channels_last",
**kwargs):
super(AirBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.use_air_block = (strides == 1 and mid_channels < 512)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=mid_channels,
ratio=ratio,
data_format=data_format,
name="air")
def call(self, x, training=None):
if self.use_air_block:
att = self.air(x, training=training)
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_air_block:
x = x * att
x = self.conv3(x, training=training)
return x
class AirUnit(nn.Layer):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
ratio,
data_format="channels_last",
**kwargs):
super(AirUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = AirBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
ratio=ratio,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class AirInitBlock(nn.Layer):
"""
AirNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(AirInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv3")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool(x)
return x
class AirNet(tf.keras.Model):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
ratio,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(AirNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
ratio=ratio,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_airnet(blocks,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNet(
channels=channels,
init_block_channels=init_block_channels,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def airnet50_1x64d_r2(**kwargs):
"""
AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs)
def airnet50_1x64d_r16(**kwargs):
"""
AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs)
def airnet101_1x64d_r2(**kwargs):
"""
AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
airnet50_1x64d_r2,
airnet50_1x64d_r16,
airnet101_1x64d_r2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnet50_1x64d_r2 or weight_count == 27425864)
assert (model != airnet50_1x64d_r16 or weight_count == 25714952)
assert (model != airnet101_1x64d_r2 or weight_count == 51727432)
if __name__ == "__main__":
_test()
| 14,996 | 31.182403 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mnasnet.py | """
MnasNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\
SimpleSequential, flatten
class DwsExpSEResUnit(nn.Layer):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
activation : str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
activation="relu",
data_format="channels_last",
**kwargs):
super(DwsExpSEResUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_exp_conv = exp_factor > 1
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
data_format=data_format,
name="exp_conv")
self.dw_conv = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="dw_conv")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation,
data_format=data_format,
name="se")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x, training=training)
x = self.dw_conv(x, training=training)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x, training=training)
if self.residual:
x = x + identity
return x
class MnasInitBlock(nn.Layer):
"""
MnasNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
data_format="channels_last",
**kwargs):
super(MnasInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = DwsExpSEResUnit(
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class MnasFinalBlock(nn.Layer):
"""
MnasNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
data_format="channels_last",
**kwargs):
super(MnasFinalBlock, self).__init__(**kwargs)
self.conv1 = DwsExpSEResUnit(
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class MnasNet(tf.keras.Model):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MnasNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(MnasInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
use_skip=init_block_use_skip,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
stage.add(DwsExpSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(MnasFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
use_skip=final_block_use_skip,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels[1]
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
if __name__ == "__main__":
_test()
| 15,818 | 33.997788 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/pyramidnet_cifar.py | """
PyramidNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn',
'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn',
'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn',
'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn',
'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, SimpleSequential, flatten, is_channels_first
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(tf.keras.Model):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARPyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
activation=None,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_pyramidnet_cifar(classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def pyramidnet110_a48_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a48_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_svhn",
**kwargs)
def pyramidnet110_a84_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a84_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_svhn",
**kwargs)
def pyramidnet110_a270_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet110_a270_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_svhn",
**kwargs)
def pyramidnet164_a270_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet164_a270_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_svhn",
**kwargs)
def pyramidnet200_a240_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_svhn",
**kwargs)
def pyramidnet236_a220_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_svhn",
**kwargs)
def pyramidnet272_a200_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_svhn",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a48_svhn, 10),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a84_svhn, 10),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet110_a270_svhn, 10),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet164_a270_bn_svhn, 10),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet200_a240_bn_svhn, 10),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet236_a220_bn_svhn, 10),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
(pyramidnet272_a200_bn_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a48_svhn or weight_count == 1772706)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a84_svhn or weight_count == 3904446)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet110_a270_svhn or weight_count == 28485477)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842)
if __name__ == "__main__":
_test()
| 24,103 | 32.711888 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/preresnet_cifar.py | """
PreResNet for CIFAR/SVHN, implemented in TensorFlow.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn',
'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn',
'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn',
'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn',
'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn',
'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn',
'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn',
'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3, SimpleSequential, flatten, is_channels_first
from .preresnet import PreResUnit, PreResActivation
class CIFARPreResNet(tf.keras.Model):
"""
PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_preresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def preresnet20_cifar10(classes=10, **kwargs):
"""
PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs)
def preresnet20_cifar100(classes=100, **kwargs):
"""
PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100",
**kwargs)
def preresnet20_svhn(classes=10, **kwargs):
"""
PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs)
def preresnet56_cifar10(classes=10, **kwargs):
"""
PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs)
def preresnet56_cifar100(classes=100, **kwargs):
"""
PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100",
**kwargs)
def preresnet56_svhn(classes=10, **kwargs):
"""
PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs)
def preresnet110_cifar10(classes=10, **kwargs):
"""
PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10",
**kwargs)
def preresnet110_cifar100(classes=100, **kwargs):
"""
PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100",
**kwargs)
def preresnet110_svhn(classes=10, **kwargs):
"""
PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn",
**kwargs)
def preresnet164bn_cifar10(classes=10, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10",
**kwargs)
def preresnet164bn_cifar100(classes=100, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100",
**kwargs)
def preresnet164bn_svhn(classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn",
**kwargs)
def preresnet272bn_cifar10(classes=10, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar10",
**kwargs)
def preresnet272bn_cifar100(classes=100, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar100",
**kwargs)
def preresnet272bn_svhn(classes=10, **kwargs):
"""
PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_svhn",
**kwargs)
def preresnet542bn_cifar10(classes=10, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar10",
**kwargs)
def preresnet542bn_cifar100(classes=100, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar100",
**kwargs)
def preresnet542bn_svhn(classes=10, **kwargs):
"""
PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_svhn",
**kwargs)
def preresnet1001_cifar10(classes=10, **kwargs):
"""
PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10",
**kwargs)
def preresnet1001_cifar100(classes=100, **kwargs):
"""
PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100",
**kwargs)
def preresnet1001_svhn(classes=10, **kwargs):
"""
PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn",
**kwargs)
def preresnet1202_cifar10(classes=10, **kwargs):
"""
PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10",
**kwargs)
def preresnet1202_cifar100(classes=100, **kwargs):
"""
PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100",
**kwargs)
def preresnet1202_svhn(classes=10, **kwargs):
"""
PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(preresnet20_cifar10, 10),
(preresnet20_cifar100, 100),
(preresnet20_svhn, 10),
(preresnet56_cifar10, 10),
(preresnet56_cifar100, 100),
(preresnet56_svhn, 10),
(preresnet110_cifar10, 10),
(preresnet110_cifar100, 100),
(preresnet110_svhn, 10),
(preresnet164bn_cifar10, 10),
(preresnet164bn_cifar100, 100),
(preresnet164bn_svhn, 10),
(preresnet272bn_cifar10, 10),
(preresnet272bn_cifar100, 100),
(preresnet272bn_svhn, 10),
(preresnet542bn_cifar10, 10),
(preresnet542bn_cifar100, 100),
(preresnet542bn_svhn, 10),
(preresnet1001_cifar10, 10),
(preresnet1001_cifar100, 100),
(preresnet1001_svhn, 10),
(preresnet1202_cifar10, 10),
(preresnet1202_cifar100, 100),
(preresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 32, 32) if is_channels_first(data_format) else (batch, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet20_cifar10 or weight_count == 272282)
assert (model != preresnet20_cifar100 or weight_count == 278132)
assert (model != preresnet20_svhn or weight_count == 272282)
assert (model != preresnet56_cifar10 or weight_count == 855578)
assert (model != preresnet56_cifar100 or weight_count == 861428)
assert (model != preresnet56_svhn or weight_count == 855578)
assert (model != preresnet110_cifar10 or weight_count == 1730522)
assert (model != preresnet110_cifar100 or weight_count == 1736372)
assert (model != preresnet110_svhn or weight_count == 1730522)
assert (model != preresnet164bn_cifar10 or weight_count == 1703258)
assert (model != preresnet164bn_cifar100 or weight_count == 1726388)
assert (model != preresnet164bn_svhn or weight_count == 1703258)
assert (model != preresnet272bn_cifar10 or weight_count == 2816090)
assert (model != preresnet272bn_cifar100 or weight_count == 2839220)
assert (model != preresnet272bn_svhn or weight_count == 2816090)
assert (model != preresnet542bn_cifar10 or weight_count == 5598170)
assert (model != preresnet542bn_cifar100 or weight_count == 5621300)
assert (model != preresnet542bn_svhn or weight_count == 5598170)
assert (model != preresnet1001_cifar10 or weight_count == 10327706)
assert (model != preresnet1001_cifar100 or weight_count == 10350836)
assert (model != preresnet1001_svhn or weight_count == 10327706)
assert (model != preresnet1202_cifar10 or weight_count == 19423834)
assert (model != preresnet1202_cifar100 or weight_count == 19429684)
assert (model != preresnet1202_svhn or weight_count == 19423834)
if __name__ == "__main__":
_test()
| 24,758 | 36.11994 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/alphapose_coco.py | """
AlphaPose for COCO Keypoint, implemented in TensorFlow.
Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
"""
__all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco']
import os
import tensorflow as tf
from .common import conv3x3, PixelShuffle, DucBlock, HeatmapMaxDetBlock, SimpleSequential, is_channels_first
from .fastseresnet import fastseresnet101b
class AlphaPose(tf.keras.Model):
"""
AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
return_heatmap : bool, default False
Whether to return only heatmap.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
return_heatmap=False,
in_channels=3,
in_size=(256, 192),
keypoints=17,
data_format="channels_last",
**kwargs):
super(AlphaPose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.data_format = data_format
self.backbone = backbone
self.backbone._name = "backbone"
self.decoder = SimpleSequential(name="decoder")
self.decoder.add(PixelShuffle(
scale_factor=2,
data_format=data_format,
name="init_block"))
in_channels = backbone_out_channels // 4
for i, out_channels in enumerate(channels):
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
self.decoder.add(conv3x3(
in_channels=in_channels,
out_channels=keypoints,
use_bias=True,
data_format=data_format,
name="final_block"))
self.heatmap_max_det = HeatmapMaxDetBlock(
data_format=data_format,
name="heatmap_max_det")
def call(self, x, training=None):
x = self.backbone(x, training=training)
heatmap = self.decoder(x, training=training)
if self.return_heatmap or not tf.executing_eagerly():
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_alphapose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
data_format="channels_last",
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AlphaPose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [256, 128]
net = AlphaPose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
data_format=data_format,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, data_format="channels_last", **kwargs):
"""
AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,'
https://arxiv.org/abs/1612.00137.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = fastseresnet101b(pretrained=pretrained_backbone, data_format=data_format).features
del backbone.children[-1]
return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="alphapose_fastseresnet101b_coco", data_format=data_format, **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
alphapose_fastseresnet101b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
if is_channels_first(data_format):
assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and
(y.shape[3] == x.shape[3] // 4))
else:
assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and
(y.shape[2] == x.shape[2] // 4))
else:
assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873)
if __name__ == "__main__":
_test()
| 7,571 | 34.886256 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/pyramidnet.py | """
PyramidNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, BatchNorm, MaxPool2d, AvgPool2d, pre_conv1x1_block, pre_conv3x3_block, SimpleSequential,\
flatten, is_channels_first
from .preresnet import PreResActivation
class PyrBlock(nn.Layer):
"""
Simple PyramidNet block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(PyrBlock, self).__init__(**kwargs)
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activate=False,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class PyrBottleneck(nn.Layer):
"""
PyramidNet bottleneck block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
data_format="channels_last",
**kwargs):
super(PyrBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activate=False,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
data_format=data_format,
name="conv2")
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class PyrUnit(nn.Layer):
"""
PyramidNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
data_format="channels_last",
**kwargs):
super(PyrUnit, self).__init__(**kwargs)
assert (out_channels >= in_channels)
self.data_format = data_format
self.resize_identity = (strides != 1)
self.identity_pad_width = out_channels - in_channels
if bottleneck:
self.body = PyrBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
else:
self.body = PyrBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
if self.resize_identity:
self.identity_pool = AvgPool2d(
pool_size=2,
strides=strides,
ceil_mode=True,
data_format=data_format,
name="identity_pool")
def call(self, x, training=None):
identity = x
x = self.body(x, training=training)
x = self.bn(x, training=training)
if self.resize_identity:
identity = self.identity_pool(identity)
if self.identity_pad_width > 0:
if is_channels_first(self.data_format):
paddings = [[0, 0], [0, self.identity_pad_width], [0, 0], [0, 0]]
else:
paddings = [[0, 0], [0, 0], [0, 0], [0, self.identity_pad_width]]
identity = tf.pad(identity, paddings=paddings)
x = x + identity
return x
class PyrInitBlock(nn.Layer):
"""
PyramidNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(PyrInitBlock, self).__init__(**kwargs)
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
self.activ = nn.ReLU()
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
x = self.activ(x)
x = self.pool(x)
return x
class PyramidNet(tf.keras.Model):
"""
PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(PyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(PyrInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_pyramidnet(blocks,
alpha,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PyramidNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if blocks < 50:
bottleneck = False
else:
bottleneck = True
channels = [[cij * 4 for cij in ci] for ci in channels]
net = PyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def pyramidnet101_a360(**kwargs):
"""
PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
pyramidnet101_a360,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet101_a360 or weight_count == 42455070)
if __name__ == "__main__":
_test()
| 13,503 | 30.699531 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/seresnet.py | """
SE-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, SEBlock, SimpleSequential, flatten
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class SEResUnit(nn.Layer):
"""
SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
data_format="channels_last",
**kwargs):
super(SEResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNet(tf.keras.Model):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
if __name__ == "__main__":
_test()
| 19,070 | 32.694346 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/seresnet_cub.py | """
SE-ResNet for CUB-200-2011, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub',
'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub',
'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub',
'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub']
from .common import is_channels_first
from .seresnet import get_seresnet
def seresnet10_cub(classes=200, **kwargs):
"""
SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=10, model_name="seresnet10_cub", **kwargs)
def seresnet12_cub(classes=200, **kwargs):
"""
SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=12, model_name="seresnet12_cub", **kwargs)
def seresnet14_cub(classes=200, **kwargs):
"""
SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, model_name="seresnet14_cub", **kwargs)
def seresnetbc14b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="seresnetbc14b_cub",
**kwargs)
def seresnet16_cub(classes=200, **kwargs):
"""
SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=16, model_name="seresnet16_cub", **kwargs)
def seresnet18_cub(classes=200, **kwargs):
"""
SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=18, model_name="seresnet18_cub", **kwargs)
def seresnet26_cub(classes=200, **kwargs):
"""
SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs)
def seresnetbc26b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b_cub",
**kwargs)
def seresnet34_cub(classes=200, **kwargs):
"""
SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=34, model_name="seresnet34_cub", **kwargs)
def seresnetbc38b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b_cub",
**kwargs)
def seresnet50_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, model_name="seresnet50_cub", **kwargs)
def seresnet50b_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs)
def seresnet101_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, model_name="seresnet101_cub", **kwargs)
def seresnet101b_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs)
def seresnet152_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, model_name="seresnet152_cub", **kwargs)
def seresnet152b_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub", **kwargs)
def seresnet200_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, model_name="seresnet200_cub", **kwargs)
def seresnet200b_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
seresnet10_cub,
seresnet12_cub,
seresnet14_cub,
seresnetbc14b_cub,
seresnet16_cub,
seresnet18_cub,
seresnet26_cub,
seresnetbc26b_cub,
seresnet34_cub,
seresnetbc38b_cub,
seresnet50_cub,
seresnet50b_cub,
seresnet101_cub,
seresnet101b_cub,
seresnet152_cub,
seresnet152b_cub,
seresnet200_cub,
seresnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10_cub or weight_count == 5052932)
assert (model != seresnet12_cub or weight_count == 5127496)
assert (model != seresnet14_cub or weight_count == 5425104)
assert (model != seresnetbc14b_cub or weight_count == 9126136)
assert (model != seresnet16_cub or weight_count == 6614240)
assert (model != seresnet18_cub or weight_count == 11368192)
assert (model != seresnet26_cub or weight_count == 17683452)
assert (model != seresnetbc26b_cub or weight_count == 15756776)
assert (model != seresnet34_cub or weight_count == 21548468)
assert (model != seresnetbc38b_cub or weight_count == 22387416)
assert (model != seresnet50_cub or weight_count == 26448824)
assert (model != seresnet50b_cub or weight_count == 26448824)
assert (model != seresnet101_cub or weight_count == 47687672)
assert (model != seresnet101b_cub or weight_count == 47687672)
assert (model != seresnet152_cub or weight_count == 65182648)
assert (model != seresnet152b_cub or weight_count == 65182648)
assert (model != seresnet200_cub or weight_count == 70196664)
assert (model != seresnet200b_cub or weight_count == 70196664)
if __name__ == "__main__":
_test()
| 14,111 | 35.942408 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/densenet.py | """
DenseNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import pre_conv1x1_block, pre_conv3x3_block, AvgPool2d, SimpleSequential, get_channel_axis, flatten
from .preresnet import PreResInitBlock, PreResActivation
class DenseUnit(nn.Layer):
"""
DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
data_format="channels_last",
**kwargs):
super(DenseUnit, self).__init__(**kwargs)
self.data_format = data_format
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
data_format=data_format,
name="conv2")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, training=None):
identity = x
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.use_dropout:
x = self.dropout(x, training=training)
x = tf.concat([identity, x], axis=get_channel_axis(self.data_format))
return x
class TransitionBlock(nn.Layer):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
self.conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.pool = AvgPool2d(
pool_size=2,
strides=2,
padding=0)
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class DenseNet(tf.keras.Model):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
data_format=data_format,
name="trans{}".format(i + 1)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(DenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="post_activ"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
if __name__ == "__main__":
_test()
| 11,289 | 32.011696 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/seresnext.py | """
SE-ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, SEBlock, SimpleSequential, flatten
from .resnet import ResInitBlock
from .resnext import ResNeXtBottleneck
class SEResNeXtUnit(nn.Layer):
"""
SE-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
data_format="channels_last",
**kwargs):
super(SEResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="body")
self.se = SEBlock(
channels=out_channels,
data_format=data_format,
name="se")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNeXt(tf.keras.Model):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
if __name__ == "__main__":
_test()
| 9,503 | 31.772414 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/drn.py | """
DRN for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
"""
__all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, BatchNorm, SimpleSequential, flatten, is_channels_first
class DRNConv(nn.Layer):
"""
DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
activate,
data_format="channels_last",
**kwargs):
super(DRNConv, self).__init__(**kwargs)
self.activate = activate
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=False,
data_format=data_format,
name="conv")
self.bn = BatchNorm(
data_format=data_format,
name="bn")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x)
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def drn_conv1x1(in_channels,
out_channels,
strides,
activate,
data_format="channels_last",
**kwargs):
"""
1x1 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
dilation=1,
activate=activate,
data_format=data_format,
**kwargs)
def drn_conv3x3(in_channels,
out_channels,
strides,
dilation,
activate,
data_format="channels_last",
**kwargs):
"""
3x3 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layer.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=dilation,
dilation=dilation,
activate=activate,
data_format=data_format,
**kwargs)
class DRNBlock(nn.Layer):
"""
Simple DRN block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilation,
data_format="channels_last",
**kwargs):
super(DRNBlock, self).__init__(**kwargs)
self.conv1 = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=stride,
dilation=dilation,
activate=True,
data_format=data_format,
name="conv1")
self.conv2 = drn_conv3x3(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
dilation=dilation,
activate=False,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class DRNBottleneck(nn.Layer):
"""
DRN bottleneck block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
data_format="channels_last",
**kwargs):
super(DRNBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.conv1 = drn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
activate=True,
data_format=data_format,
name="conv1")
self.conv2 = drn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
dilation=dilation,
activate=True,
data_format=data_format,
name="conv2")
self.conv3 = drn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
activate=False,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class DRNUnit(nn.Layer):
"""
DRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
simplified : bool
Whether to use a simple or simplified block in units.
residual : bool
Whether do residual calculations.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bottleneck,
simplified,
residual,
data_format="channels_last",
**kwargs):
super(DRNUnit, self).__init__(**kwargs)
assert residual or (not bottleneck)
assert (not (bottleneck and simplified))
assert (not (residual and simplified))
self.residual = residual
self.resize_identity = ((in_channels != out_channels) or (strides != 1)) and self.residual and (not simplified)
if bottleneck:
self.body = DRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
data_format=data_format,
name="body")
elif simplified:
self.body = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
activate=False,
data_format=data_format,
name="body")
else:
self.body = DRNBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=strides,
dilation=dilation,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = drn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activate=False,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
if self.residual:
x = x + identity
x = self.activ(x)
return x
def drn_init_block(in_channels,
out_channels,
data_format="channels_last",
**kwargs):
"""
DRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=1,
padding=3,
dilation=1,
activate=True,
data_format=data_format,
**kwargs)
class DRN(tf.keras.Model):
"""
DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilation values for 3x3 convolution layers for each unit.
bottlenecks : list of list of int
Whether to use a bottleneck or simple block in each unit.
simplifieds : list of list of int
Whether to use a simple or simplified block in each unit.
residuals : list of list of int
Whether to use residual block in each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bottlenecks,
simplifieds,
residuals,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(drn_init_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilations[i][j],
bottleneck=(bottlenecks[i][j] == 1),
simplified=(simplifieds[i][j] == 1),
residual=(residuals[i][j] == 1),
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=28,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = Conv2d(
in_channels=in_channels,
out_channels=classes,
kernel_size=1,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
x = flatten(x, self.data_format)
return x
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
from functools import reduce
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def drnc26(**kwargs):
"""
DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=26, model_name="drnc26", **kwargs)
def drnc42(**kwargs):
"""
DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=42, model_name="drnc42", **kwargs)
def drnc58(**kwargs):
"""
DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=58, model_name="drnc58", **kwargs)
def drnd22(**kwargs):
"""
DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs)
def drnd38(**kwargs):
"""
DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs)
def drnd54(**kwargs):
"""
DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs)
def drnd105(**kwargs):
"""
DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
drnc26,
drnc42,
drnc58,
drnd22,
drnd38,
drnd54,
drnd105,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != drnc26 or weight_count == 21126584)
assert (model != drnc42 or weight_count == 31234744)
assert (model != drnc58 or weight_count == 40542008) # 41591608
assert (model != drnd22 or weight_count == 16393752)
assert (model != drnd38 or weight_count == 26501912)
assert (model != drnd54 or weight_count == 35809176)
assert (model != drnd105 or weight_count == 54801304)
if __name__ == "__main__":
_test()
| 21,693 | 30.44058 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mixnet.py | """
MixNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
"""
__all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, get_activation_layer, Conv2d, BatchNorm, conv1x1_block,\
conv3x3_block, dwconv3x3_block, SEBlock, SimpleSequential, flatten, is_channels_first, get_channel_axis
class MixConv(nn.Layer):
"""
Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
axis : int, default 1
The axis on which to concatenate the outputs.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
axis=1,
data_format="channels_last",
**kwargs):
super(MixConv, self).__init__(**kwargs)
kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
padding = padding if isinstance(padding, list) else [padding]
kernel_count = len(kernel_size)
self.splitted_in_channels = self.split_channels(in_channels, kernel_count)
splitted_out_channels = self.split_channels(out_channels, kernel_count)
self.axis = axis
self.convs = []
for i, kernel_size_i in enumerate(kernel_size):
in_channels_i = self.splitted_in_channels[i]
out_channels_i = splitted_out_channels[i]
padding_i = padding[i]
self.convs.append(
Conv2d(
in_channels=in_channels_i,
out_channels=out_channels_i,
kernel_size=kernel_size_i,
strides=strides,
padding=padding_i,
dilation=dilation,
groups=(out_channels_i if out_channels == groups else groups),
use_bias=use_bias,
data_format=data_format,
name="conv{}".format(i + 1)))
def call(self, x, training=None):
xx = tf.split(x, num_or_size_splits=self.splitted_in_channels, axis=self.axis)
out = [conv_i(x_i, training=training) for x_i, conv_i in zip(xx, self.convs)]
x = tf.concat(out, axis=self.axis)
return x
@staticmethod
def split_channels(channels, kernel_count):
splitted_channels = [channels // kernel_count] * kernel_count
splitted_channels[0] += channels - sum(splitted_channels)
return splitted_channels
class MixConvBlock(nn.Layer):
"""
Mixed convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU()),
data_format="channels_last",
**kwargs):
super(MixConvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MixConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
axis=get_channel_axis(data_format),
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation)
def call(self, x, training=None):
x = self.conv(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
return x
def mixconv1x1_block(in_channels,
out_channels,
kernel_count,
strides=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.Activation("relu")),
data_format="channels_last",
**kwargs):
"""
1x1 version of the mixed convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_count : int
Kernel count.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str, or None, default nn.Activation("relu")
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MixConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=([1] * kernel_count),
strides=strides,
padding=([0] * kernel_count),
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
class MixUnit(nn.Layer):
"""
MixNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_kernel_count : int
Expansion convolution kernel count for each unit.
conv1_kernel_count : int
Conv1 kernel count for each unit.
conv2_kernel_count : int
Conv2 kernel count for each unit.
exp_factor : int
Expansion factor for each unit.
se_factor : int
SE reduction factor for each unit.
activation : str
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_kernel_count,
conv1_kernel_count,
conv2_kernel_count,
exp_factor,
se_factor,
activation,
data_format="channels_last",
**kwargs):
super(MixUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
assert (se_factor >= 0)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
self.use_exp_conv = exp_factor > 1
if self.use_exp_conv:
if exp_kernel_count == 1:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
data_format=data_format,
name="exp_conv")
else:
self.exp_conv = mixconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
kernel_count=exp_kernel_count,
activation=activation,
data_format=data_format,
name="exp_conv")
if conv1_kernel_count == 1:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="conv1")
else:
self.conv1 = MixConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)],
strides=strides,
padding=[1 + i for i in range(conv1_kernel_count)],
groups=mid_channels,
activation=activation,
data_format=data_format,
name="conv1")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation,
data_format=data_format,
name="se")
if conv2_kernel_count == 1:
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv2")
else:
self.conv2 = mixconv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
kernel_count=conv2_kernel_count,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x, training=training)
x = self.conv1(x, training=training)
if self.use_se:
x = self.se(x)
x = self.conv2(x, training=training)
if self.residual:
x = x + identity
return x
class MixInitBlock(nn.Layer):
"""
MixNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(MixInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = MixUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
exp_kernel_count=1,
conv1_kernel_count=1,
conv2_kernel_count=1,
exp_factor=1,
se_factor=0,
activation="relu",
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class MixNet(tf.keras.Model):
"""
MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
exp_kernel_counts : list of list of int
Expansion convolution kernel count for each unit.
conv1_kernel_counts : list of list of int
Conv1 kernel count for each unit.
conv2_kernel_counts : list of list of int
Conv2 kernel count for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
exp_kernel_counts,
conv1_kernel_counts,
conv2_kernel_counts,
exp_factors,
se_factors,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MixNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(MixInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 3)) or \
((j == len(channels_per_stage) // 2) and (i == 3)) else 1
exp_kernel_count = exp_kernel_counts[i][j]
conv1_kernel_count = conv1_kernel_counts[i][j]
conv2_kernel_count = conv2_kernel_counts[i][j]
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
activation = "relu" if i == 0 else "swish"
stage.add(MixUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_kernel_count=exp_kernel_count,
conv1_kernel_count=conv1_kernel_count,
conv2_kernel_count=conv2_kernel_count,
exp_factor=exp_factor,
se_factor=se_factor,
activation=activation,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation=activation,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_mixnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MixNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "s":
init_block_channels = 16
channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]]
conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]]
elif version == "m":
init_block_channels = 24
channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]]
conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]]
else:
raise ValueError("Unsupported MixNet version {}".format(version))
final_block_channels = 1536
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MixNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
exp_kernel_counts=exp_kernel_counts,
conv1_kernel_counts=conv1_kernel_counts,
conv2_kernel_counts=conv2_kernel_counts,
exp_factors=exp_factors,
se_factors=se_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def mixnet_s(**kwargs):
"""
MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs)
def mixnet_m(**kwargs):
"""
MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs)
def mixnet_l(**kwargs):
"""
MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
mixnet_s,
mixnet_m,
mixnet_l,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mixnet_s or weight_count == 4134606)
assert (model != mixnet_m or weight_count == 5014382)
assert (model != mixnet_l or weight_count == 7329252)
if __name__ == "__main__":
_test()
| 23,110 | 34.886646 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/dabnet.py | """
DABNet for image segmentation, implemented in TensorFlow.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential, SimpleSequential, is_channels_first, get_im_size, PReLU2, MaxPool2d, AvgPool2d, get_channel_axis
class DwaConvBlock(nn.Layer):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
super(DwaConvBlock, self).__init__(**kwargs)
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
def dwa_conv3x3_block(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs)
class DABBlock(nn.Layer):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
dilation,
bn_eps,
data_format="channels_last",
**kwargs):
super(DABBlock, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
mid_channels = channels // 2
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ1")
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.branches = Concurrent(
stack=True,
data_format=data_format,
name="branches")
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="branches1"))
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="branches2"))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ2")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
identity = x
x = self.norm_activ1(x, training=training)
x = self.conv1(x, training=training)
x = self.branches(x, training=training)
x = tf.math.reduce_sum(x, axis=self.axis)
x = self.norm_activ2(x, training=training)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(nn.Layer):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(DownBlock, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv")
if self.expand:
self.pool = MaxPool2d(
pool_size=2,
strides=2,
data_format=data_format,
name="pool")
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ")
def call(self, x, training=None):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = tf.concat([y, z], axis=self.axis)
y = self.norm_activ(y, training=training)
return y
class DABUnit(nn.Layer):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_eps,
data_format="channels_last",
**kwargs):
super(DABUnit, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
mid_channels = out_channels // 2
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
data_format=data_format,
name="down")
self.blocks = SimpleSequential(name="blocks")
for i, dilation in enumerate(dilations):
self.blocks.add(DABBlock(
channels=mid_channels,
dilation=dilation,
bn_eps=bn_eps,
data_format=data_format,
name="block{}".format(i + 1)))
def call(self, x, training=None):
x = self.down(x, training=training)
y = self.blocks(x, training=training)
x = tf.concat([y, x], axis=self.axis)
return x
class DABStage(nn.Layer):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_eps,
data_format="channels_last",
**kwargs):
super(DABStage, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
self.use_unit = (len(dilations) > 0)
self.x_down = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="x_down")
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_eps=bn_eps,
data_format=data_format,
name="unit")
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(y_out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ")
def call(self, y, x, training=None):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y, training=training)
y = tf.concat([y, x], axis=self.axis)
y = self.norm_activ(y, training=training)
return y, x
class DABInitBlock(nn.Layer):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(DABInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class DABNet(tf.keras.Model):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
data_format="channels_last",
**kwargs):
super(DABNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.data_format = data_format
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0,
name="features")
self.features.add(DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add(DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_eps=bn_eps,
data_format=data_format,
name="stage{}".format(i + 1)))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes,
data_format=data_format,
name="classifier")
self.up = InterpolationBlock(
scale_factor=8,
data_format=data_format,
name="up")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
y = self.features(x, x, training=training)
y = self.classifier(y)
y = self.up(y, size=in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_eps = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def dabnet_cityscapes(classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format)
else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
if __name__ == "__main__":
_test()
| 20,630 | 31.592417 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/cgnet.py | """
CGNet for image segmentation, implemented in TensorFlow.
Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
"""
__all__ = ['CGNet', 'cgnet_cityscapes']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\
DualPathSequential, InterpolationBlock, SimpleSequential, is_channels_first, get_im_size, PReLU2, AvgPool2d,\
get_channel_axis
class CGBlock(nn.Layer):
"""
CGNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
down : bool
Whether to downsample.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
dilation,
se_reduction,
down,
bn_eps,
data_format="channels_last",
**kwargs):
super(CGBlock, self).__init__(**kwargs)
self.down = down
if self.down:
mid1_channels = out_channels
mid2_channels = 2 * out_channels
else:
mid1_channels = out_channels // 2
mid2_channels = out_channels
if self.down:
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
else:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid1_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(depthwise_conv3x3(
channels=mid1_channels,
data_format=data_format,
name="branches1"))
self.branches.add(depthwise_conv3x3(
channels=mid1_channels,
padding=dilation,
dilation=dilation,
data_format=data_format,
name="branches2"))
self.norm_activ = NormActivation(
in_channels=mid2_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(mid2_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ")
if self.down:
self.conv2 = conv1x1(
in_channels=mid2_channels,
out_channels=out_channels,
data_format=data_format,
name="conv2")
self.se = SEBlock(
channels=out_channels,
reduction=se_reduction,
use_conv=False,
data_format=data_format,
name="se")
def call(self, x, training=None):
if not self.down:
identity = x
x = self.conv1(x, training=training)
x = self.branches(x, training=training)
x = self.norm_activ(x, training=training)
if self.down:
x = self.conv2(x, training=training)
x = self.se(x, training=training)
if not self.down:
x += identity
return x
class CGUnit(nn.Layer):
"""
CGNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
layers,
dilation,
se_reduction,
bn_eps,
data_format="channels_last",
**kwargs):
super(CGUnit, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
mid_channels = out_channels // 2
self.down = CGBlock(
in_channels=in_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=True,
bn_eps=bn_eps,
data_format=data_format,
name="down")
self.blocks = SimpleSequential(name="blocks")
for i in range(layers - 1):
self.blocks.add(CGBlock(
in_channels=mid_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=False,
bn_eps=bn_eps,
data_format=data_format,
name="block{}".format(i + 1)))
def call(self, x, training=None):
x = self.down(x, training=training)
y = self.blocks(x, training=training)
x = tf.concat([y, x], axis=self.axis) # NB: This differs from the original implementation.
return x
class CGStage(nn.Layer):
"""
CGNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
layers : int
Number of layers in the unit.
dilation : int
Dilation for blocks.
se_reduction : int
SE-block reduction value for blocks.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
layers,
dilation,
se_reduction,
bn_eps,
data_format="channels_last",
**kwargs):
super(CGStage, self).__init__(**kwargs)
self.axis = get_channel_axis(data_format)
self.use_x = (x_channels > 0)
self.use_unit = (layers > 0)
if self.use_x:
self.x_down = AvgPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="x_down")
if self.use_unit:
self.unit = CGUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
layers=layers,
dilation=dilation,
se_reduction=se_reduction,
bn_eps=bn_eps,
data_format=data_format,
name="unit")
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(y_out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="norm_activ")
def call(self, y, x=None, training=None):
if self.use_unit:
y = self.unit(y, training=training)
if self.use_x:
x = self.x_down(x)
y = tf.concat([y, x], axis=self.axis)
y = self.norm_activ(y, training=training)
return y, x
class CGInitBlock(nn.Layer):
"""
CGNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(CGInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: PReLU2(out_channels, data_format=data_format, name="activ")),
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class CGNet(tf.keras.Model):
"""
CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of int
Dilations for each unit.
se_reductions : list of int
SE-block reduction value for each unit.
cut_x : list of int
Whether to concatenate with x-branch for each unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
layers,
channels,
init_block_channels,
dilations,
se_reductions,
cut_x,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
data_format="channels_last",
**kwargs):
super(CGNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.data_format = data_format
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0,
name="features")
self.features.add(CGInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
y_in_channels = init_block_channels
for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)):
self.features.add(CGStage(
x_channels=in_channels if cut_x[i] == 1 else 0,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
layers=layers_i,
dilation=dilations[i],
se_reduction=se_reductions[i],
bn_eps=bn_eps,
data_format=data_format,
name="stage{}".format(i + 1)))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes,
data_format=data_format,
name="classifier")
self.up = InterpolationBlock(
scale_factor=8,
data_format=data_format,
name="up")
def call(self, x, training=None):
in_size = self.in_size if self.fixed_size else get_im_size(x, data_format=self.data_format)
y = self.features(x, x, training=training)
y = self.classifier(y)
y = self.up(y, size=in_size)
return y
def get_cgnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create CGNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [0, 3, 21]
channels = [35, 131, 256]
dilations = [0, 2, 4]
se_reductions = [0, 8, 16]
cut_x = [1, 1, 0]
bn_eps = 1e-3
net = CGNet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
se_reductions=se_reductions,
cut_x=cut_x,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
by_name=True,
skip_mismatch=True)
return net
def cgnet_cityscapes(classes=19, **kwargs):
"""
CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_cgnet(classes=classes, model_name="cgnet_cityscapes", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
in_size = (1024, 2048)
classes = 19
models = [
cgnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, data_format=data_format)
batch = 4
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, classes, in_size[0], in_size[1]) if is_channels_first(data_format)
else tuple(y.shape.as_list()) == (batch, in_size[0], in_size[1], classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cgnet_cityscapes or weight_count == 496306)
if __name__ == "__main__":
_test()
| 16,751 | 31.528155 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/fbnet.py | """
FBNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
"""
__all__ = ['FBNet', 'fbnet_cb']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SimpleSequential, flatten,\
is_channels_first
class FBNetUnit(nn.Layer):
"""
FBNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_eps : float
Small float added to variance in Batch norm.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int
Expansion factor for each unit.
activation : str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_eps,
use_kernel3,
exp_factor,
activation="relu",
data_format="channels_last",
**kwargs):
super(FBNetUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_exp_conv = True
mid_channels = exp_factor * in_channels
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="exp_conv")
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv1")
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x, training=training)
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
if self.residual:
x = x + identity
return x
class FBNetInitBlock(nn.Layer):
"""
FBNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps,
data_format="channels_last",
**kwargs):
super(FBNetInitBlock, self).__init__(**kwargs)
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_eps=bn_eps,
data_format=data_format,
name="conv1")
self.conv2 = FBNetUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bn_eps=bn_eps,
use_kernel3=True,
exp_factor=1,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class FBNet(tf.keras.Model):
"""
FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
bn_eps=1e-5,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(FBNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(FBNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_eps=bn_eps,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
stage.add(FBNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_eps=bn_eps,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_fbnet(version,
bn_eps=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create FBNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('a', 'b' or 'c').
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "c":
init_block_channels = 16
final_block_channels = 1984
channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]]
kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]
exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]]
else:
raise ValueError("Unsupported FBNet version {}".format(version))
net = FBNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def fbnet_cb(**kwargs):
"""
FBNet-Cb model (bn_eps=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural
Architecture Search,' https://arxiv.org/abs/1812.03443.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_fbnet(version="c", bn_eps=1e-3, model_name="fbnet_cb", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
fbnet_cb,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fbnet_cb or weight_count == 5572200)
if __name__ == "__main__":
_test()
| 11,383 | 32.581121 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/visemenet.py | """
VisemeNet for speech-driven facial animation, implemented in TensorFlow.
Original paper: 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
"""
__all__ = ['VisemeNet', 'visemenet20']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import DenseBlock, SimpleSequential
class VisemeDenseBranch(tf.keras.Model):
"""
VisemeNet dense branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
data_format="channels_last",
**kwargs):
super(VisemeDenseBranch, self).__init__(**kwargs)
self.branch = SimpleSequential(name="branch")
for i, out_channels in enumerate(out_channels_list[:-1]):
self.branch.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=True,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = out_channels
self.final_fc = nn.Dense(
units=out_channels_list[-1],
input_dim=in_channels,
name="final_fc")
def call(self, x, training=None):
x = self.branch(x, training=training)
y = self.final_fc(x)
return y, x
class VisemeRnnBranch(nn.Layer):
"""
VisemeNet RNN branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
rnn_num_layers : int
Number of RNN layers.
dropout_rate : float
Dropout rate.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels_list,
rnn_num_layers,
dropout_rate,
data_format="channels_last",
**kwargs):
super(VisemeRnnBranch, self).__init__(**kwargs)
assert (in_channels is not None)
self.rnn = nn.RNN([nn.LSTMCell(
units=out_channels_list[0],
dropout=dropout_rate,
name="rnn{}".format(i + 1)
) for i in range(rnn_num_layers)])
self.fc_branch = VisemeDenseBranch(
in_channels=out_channels_list[0],
out_channels_list=out_channels_list[1:],
data_format=data_format,
name="fc_branch")
def call(self, x, training=None):
x = self.rnn(x, training=training)
# x = x[:, -1, :]
y, _ = self.fc_branch(x, training=training)
return y
class VisemeNet(tf.keras.Model):
"""
VisemeNet model from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
audio_features : int, default 195
Number of audio features (characters/sounds).
audio_window_size : int, default 8
Size of audio window (for time related audio features).
stage2_window_size : int, default 64
Size of window for stage #2.
num_face_ids : int, default 76
Number of face IDs.
num_landmarks : int, default 76
Number of landmarks.
num_phonemes : int, default 21
Number of phonemes.
num_visemes : int, default 20
Number of visemes.
dropout_rate : float, default 0.5
Dropout rate for RNNs.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
audio_features=195,
audio_window_size=8,
stage2_window_size=64,
num_face_ids=76,
num_landmarks=76,
num_phonemes=21,
num_visemes=20,
dropout_rate=0.5,
data_format="channels_last",
**kwargs):
super(VisemeNet, self).__init__(**kwargs)
stage1_rnn_hidden_size = 256
stage1_fc_mid_channels = 256
stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \
stage2_window_size // audio_window_size
self.audio_window_size = audio_window_size
self.stage2_window_size = stage2_window_size
self.stage1_rnn = nn.RNN([nn.LSTMCell(
units=stage1_rnn_hidden_size,
dropout=dropout_rate,
name="stage1_rnn{}".format(i + 1)
) for i in range(3)])
self.lm_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_landmarks],
data_format=data_format,
name="lm_branch")
self.ph_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_phonemes],
data_format=data_format,
name="ph_branch")
self.cls_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, num_visemes],
rnn_num_layers=1,
dropout_rate=dropout_rate,
data_format=data_format,
name="cls_branch")
self.reg_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, 100, num_visemes],
rnn_num_layers=3,
dropout_rate=dropout_rate,
data_format=data_format,
name="reg_branch")
self.jali_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[128, 200, 2],
rnn_num_layers=3,
dropout_rate=dropout_rate,
data_format=data_format,
name="jali_branch")
def call(self, x, pid, training=None):
y = self.stage1_rnn(x, training=training)
# y = y[:, -1, :]
y = tf.concat([y, tf.cast(pid, tf.float32)], axis=1)
lm, _ = self.lm_branch(y, training=training)
lm += tf.cast(pid, tf.float32)
ph, ph1 = self.ph_branch(y, training=training)
z = tf.concat([lm, ph1], axis=1)
z2 = tf.concat([z, x[:, self.audio_window_size // 2, :]], axis=1)
n_net2_input = z2.shape[1]
z2 = tf.concat([tf.zeros((self.stage2_window_size // 2, n_net2_input)), z2], axis=0)
z = tf.stack(
[tf.reshape(
z2[i:i + self.stage2_window_size],
shape=(self.audio_window_size, n_net2_input * self.stage2_window_size // self.audio_window_size))
for i in range(z2.shape[0] - self.stage2_window_size)],
axis=0)
cls = self.cls_branch(z, training=training)
reg = self.reg_branch(z, training=training)
jali = self.jali_branch(z, training=training)
return cls, reg, jali
def get_visemenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VisemeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = VisemeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def visemenet20(**kwargs):
"""
VisemeNet model for 20 visemes (without co-articulation rules) from 'VisemeNet: Audio-Driven Animator-Centric
Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_visemenet(model_name="visemenet20", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
visemenet20,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 34
audio_window_size = 8
audio_features = 195
num_face_ids = 76
num_visemes = 20
x = tf.random.normal((batch, audio_window_size, audio_features))
pid = tf.fill(dims=(batch, num_face_ids), value=3)
y1, y2, y3 = net(x, pid)
assert (y1.shape[0] == y2.shape[0] == y3.shape[0])
assert (y1.shape[-1] == y2.shape[-1] == num_visemes)
assert (y3.shape[-1] == 2)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != visemenet20 or weight_count == 14574303)
assert (model != visemenet20 or weight_count == 14565599)
print(net.summary())
if __name__ == "__main__":
_test()
| 10,166 | 33.11745 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/mobilenetv3.py | """
MobileNetV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
__all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4',
'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2',
'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\
HSwish, SimpleSequential, flatten, is_channels_first
class MobileNetV3Unit(nn.Layer):
"""
MobileNetV3 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
activation : str
Activation function or name of activation function.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
exp_channels,
strides,
use_kernel3,
activation,
use_se,
data_format="channels_last",
**kwargs):
super(MobileNetV3Unit, self).__init__(**kwargs)
assert (exp_channels >= out_channels)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = use_se
self.use_exp_conv = exp_channels != out_channels
mid_channels = exp_channels
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
data_format=data_format,
name="exp_conv")
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="conv1")
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
data_format=data_format,
name="conv1")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid",
data_format=data_format,
name="se")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x, training=training)
x = self.conv1(x, training=training)
if self.use_se:
x = self.se(x)
x = self.conv2(x, training=training)
if self.residual:
x = x + identity
return x
class MobileNetV3FinalBlock(nn.Layer):
"""
MobileNetV3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_se,
data_format="channels_last",
**kwargs):
super(MobileNetV3FinalBlock, self).__init__(**kwargs)
self.use_se = use_se
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation="hswish",
data_format=data_format,
name="conv")
if self.use_se:
self.se = SEBlock(
channels=out_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid",
data_format=data_format,
name="se")
def call(self, x, training=None):
x = self.conv(x, training=training)
if self.use_se:
x = self.se(x)
return x
class MobileNetV3Classifier(nn.Layer):
"""
MobileNetV3 classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
dropout_rate,
data_format="channels_last",
**kwargs):
super(MobileNetV3Classifier, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.activ = HSwish()
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x)
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
x = self.conv2(x)
return x
class MobileNetV3(tf.keras.Model):
"""
MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
exp_channels : list of list of int
Number of middle (expanded) channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
use_relu : list of list of int/bool
Using ReLU activation flag for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
final_use_se : bool
Whether to use SE-module in the final block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
exp_channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
use_relu,
use_se,
first_stride,
final_use_se,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
activation="hswish",
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
exp_channels_ij = exp_channels[i][j]
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
activation = "relu" if use_relu[i][j] == 1 else "hswish"
use_se_flag = use_se[i][j] == 1
stage.add(MobileNetV3Unit(
in_channels=in_channels,
out_channels=out_channels,
exp_channels=exp_channels_ij,
use_kernel3=use_kernel3,
strides=strides,
activation=activation,
use_se=use_se_flag,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(MobileNetV3FinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
use_se=final_use_se,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = MobileNetV3Classifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels,
dropout_rate=0.2,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
x = flatten(x, self.data_format)
return x
def get_mobilenetv3(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV3 model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('small' or 'large').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "small":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]]
exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]]
first_stride = True
final_block_channels = 576
elif version == "large":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]]
kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]]
first_stride = False
final_block_channels = 960
else:
raise ValueError("Unsupported MobileNetV3 version {}".format(version))
final_use_se = False
classifier_mid_channels = 1280
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels]
init_block_channels = round_channels(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale)
net = MobileNetV3(
channels=channels,
exp_channels=exp_channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
use_relu=use_relu,
use_se=use_se,
first_stride=first_stride,
final_use_se=final_use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def mobilenetv3_small_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_small_wd2(**kwargs):
"""
MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs)
def mobilenetv3_small_w3d4(**kwargs):
"""
MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs)
def mobilenetv3_small_w1(**kwargs):
"""
MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs)
def mobilenetv3_small_w5d4(**kwargs):
"""
MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs)
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_large_wd2(**kwargs):
"""
MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs)
def mobilenetv3_large_w3d4(**kwargs):
"""
MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs)
def mobilenetv3_large_w1(**kwargs):
"""
MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs)
def mobilenetv3_large_w5d4(**kwargs):
"""
MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
mobilenetv3_small_w7d20,
mobilenetv3_small_wd2,
mobilenetv3_small_w3d4,
mobilenetv3_small_w1,
mobilenetv3_small_w5d4,
mobilenetv3_large_w7d20,
mobilenetv3_large_wd2,
mobilenetv3_large_w3d4,
mobilenetv3_large_w1,
mobilenetv3_large_w5d4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600)
assert (model != mobilenetv3_small_wd2 or weight_count == 2288976)
assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312)
assert (model != mobilenetv3_small_w1 or weight_count == 2945288)
assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632)
assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080)
assert (model != mobilenetv3_large_wd2 or weight_count == 3334896)
assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496)
assert (model != mobilenetv3_large_w1 or weight_count == 5481752)
assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144)
if __name__ == "__main__":
_test()
| 20,951 | 34.572156 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/lffd.py | """
LFFD for face detection, implemented in TensorFlow.
Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
"""
__all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3, conv1x1_block, conv3x3_block, Concurrent, MultiOutputSequential, ParallelConcurent,\
is_channels_first
from .resnet import ResUnit
from .preresnet import PreResUnit
class LffdDetectionBranch(nn.Layer):
"""
LFFD specific detection branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(LffdDetectionBranch, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
class LffdDetectionBlock(nn.Layer):
"""
LFFD specific detection block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
mid_channels,
use_bias,
use_bn,
data_format="channels_last",
**kwargs):
super(LffdDetectionBlock, self).__init__(**kwargs)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="conv")
self.branches = Concurrent(
data_format=data_format,
name="branches")
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=4,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="bbox_branch"))
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=2,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="score_branch"))
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.branches(x, training=training)
return x
class LFFD(tf.keras.Model):
"""
LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
Parameters:
----------
enc_channels : list of int
Number of output channels for each encoder stage.
dec_channels : int
Number of output channels for each decoder stage.
init_block_channels : int
Number of output channels for the initial encoder unit.
layers : list of int
Number of units in each encoder stage.
int_bends : list of int
Number of internal bends for each encoder stage.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 640)
Spatial size of the expected input image.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
enc_channels,
dec_channels,
init_block_channels,
layers,
int_bends,
use_preresnet,
in_channels=3,
in_size=(640, 640),
data_format="channels_last",
**kwargs):
super(LFFD, self).__init__(**kwargs)
self.in_size = in_size
self.data_format = data_format
unit_class = PreResUnit if use_preresnet else ResUnit
use_bias = True
use_bn = False
self.encoder = MultiOutputSequential(return_last=False)
self.encoder.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=0,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
stage = MultiOutputSequential(multi_output=False, dual_output=True, name="stage{}".format(i + 1))
stage.add(conv3x3(
in_channels=in_channels,
out_channels=channels_per_stage,
strides=2,
padding=0,
use_bias=use_bias,
data_format=data_format,
name="trans{}".format(i + 1)))
for j in range(layers_per_stage):
unit = unit_class(
in_channels=channels_per_stage,
out_channels=channels_per_stage,
strides=1,
use_bias=use_bias,
use_bn=use_bn,
bottleneck=False,
data_format=data_format,
name="unit{}".format(j + 1))
if layers_per_stage - j <= int_bends_per_stage:
unit.do_output = True
stage.add(unit)
final_activ = nn.ReLU(name="final_activ")
final_activ.do_output = True
stage.add(final_activ)
stage.do_output2 = True
in_channels = channels_per_stage
self.encoder.add(stage)
self.decoder = ParallelConcurent()
k = 0
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
for j in range(layers_per_stage):
if layers_per_stage - j <= int_bends_per_stage:
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(k + 1)))
k += 1
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn,
data_format=data_format,
name="unit{}".format(k + 1)))
k += 1
def call(self, x, training=None):
x = self.encoder(x, training=training)
x = self.decoder(x, training=training)
return x
def get_lffd(blocks,
use_preresnet,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create LFFD model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 20:
layers = [3, 1, 1, 1, 1]
enc_channels = [64, 64, 64, 128, 128]
int_bends = [0, 0, 0, 0, 0]
elif blocks == 25:
layers = [4, 2, 1, 3]
enc_channels = [64, 64, 128, 128]
int_bends = [1, 1, 0, 2]
else:
raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks))
dec_channels = 128
init_block_channels = 64
net = LFFD(
enc_channels=enc_channels,
dec_channels=dec_channels,
init_block_channels=init_block_channels,
layers=layers,
int_bends=int_bends,
use_preresnet=use_preresnet,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def lffd20x5s320v2_widerface(**kwargs):
"""
LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs)
def lffd25x8s560v1_widerface(**kwargs):
"""
LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
in_size = (640, 640)
pretrained = False
models = [
(lffd20x5s320v2_widerface, 5),
(lffd25x8s560v1_widerface, 8),
]
for model, num_outs in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else
(batch, in_size[0], in_size[1], 3))
y = net(x)
assert (len(y) == num_outs)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lffd20x5s320v2_widerface or weight_count == 1520606)
assert (model != lffd25x8s560v1_widerface or weight_count == 2290608)
if __name__ == "__main__":
_test()
| 12,116 | 32.658333 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/sepreresnet.py | """
SE-PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b', 'SEPreResUnit']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, SEBlock, SimpleSequential, flatten
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(nn.Layer):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
data_format="channels_last",
**kwargs):
super(SEPreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
self.se = SEBlock(
channels=out_channels,
data_format=data_format,
name="se")
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="identity_conv")
def call(self, x, training=None):
identity = x
x, x_pre_activ = self.body(x, training=training)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(tf.keras.Model):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="final_block"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
if __name__ == "__main__":
_test()
| 19,413 | 33.361062 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resnext.py | """
ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'ResNeXtBottleneck', 'ResNeXtUnit']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SimpleSequential, flatten
from .resnet import ResInitBlock
class ResNeXtBottleneck(nn.Layer):
"""
ResNeXt bottleneck block for residual path in ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bottleneck_factor=4,
data_format="channels_last",
**kwargs):
super(ResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
data_format=data_format,
name="conv2")
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class ResNeXtUnit(nn.Layer):
"""
ResNeXt unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
data_format="channels_last",
**kwargs):
super(ResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class ResNeXt(tf.keras.Model):
"""
ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resnext14_16x4d(**kwargs):
"""
ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
"""
ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
"""
ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
"""
ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
"""
ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
"""
ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
"""
ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
"""
ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
def resnext101_32x4d(**kwargs):
"""
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
def resnext101_64x4d(**kwargs):
"""
ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
resnext14_16x4d,
resnext14_32x2d,
resnext14_32x4d,
resnext26_16x4d,
resnext26_32x2d,
resnext26_32x4d,
resnext38_32x4d,
resnext50_32x4d,
resnext101_32x4d,
resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
batch = 14
x = tf.random.normal((batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext14_16x4d or weight_count == 7127336)
assert (model != resnext14_32x2d or weight_count == 7029416)
assert (model != resnext14_32x4d or weight_count == 9411880)
assert (model != resnext26_16x4d or weight_count == 10119976)
assert (model != resnext26_32x2d or weight_count == 9924136)
assert (model != resnext26_32x4d or weight_count == 15389480)
assert (model != resnext38_32x4d or weight_count == 21367080)
assert (model != resnext50_32x4d or weight_count == 25028904)
assert (model != resnext101_32x4d or weight_count == 44177704)
assert (model != resnext101_64x4d or weight_count == 83455272)
if __name__ == "__main__":
_test()
| 16,041 | 32.560669 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/jasper.py | """
Jasper/DR for ASR, implemented in TensorFlow.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader',
'NemoMelSpecExtractor', 'CtcDecoder']
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine.input_spec import InputSpec
from .common import get_activation_layer, Conv1d, BatchNorm, DualPathSequential, DualPathParallelConcurent,\
is_channels_first
class NemoAudioReader(object):
"""
Audio Reader from NVIDIA NEMO toolkit.
Parameters:
----------
desired_audio_sample_rate : int, default 16000
Desired audio sample rate.
trunc_value : int or None, default None
Value to truncate.
"""
def __init__(self, desired_audio_sample_rate=16000):
super(NemoAudioReader, self).__init__()
self.desired_audio_sample_rate = desired_audio_sample_rate
def read_from_file(self, audio_file_path):
"""
Read audio from file.
Parameters:
----------
audio_file_path : str
Path to audio file.
Returns:
-------
np.array
Audio data.
"""
from soundfile import SoundFile
with SoundFile(audio_file_path, "r") as data:
sample_rate = data.samplerate
audio_data = data.read(dtype="float32")
audio_data = audio_data.transpose()
if sample_rate != self.desired_audio_sample_rate:
from librosa.core import resample as lr_resample
audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate)
if audio_data.ndim >= 2:
audio_data = np.mean(audio_data, axis=1)
return audio_data
def read_from_files(self, audio_file_paths):
"""
Read audios from files.
Parameters:
----------
audio_file_paths : list of str
Paths to audio files.
Returns:
-------
list of np.array
Audio data.
"""
assert (type(audio_file_paths) in (list, tuple))
audio_data_list = []
for audio_file_path in audio_file_paths:
audio_data = self.read_from_file(audio_file_path)
audio_data_list.append(audio_data)
return audio_data_list
class NemoMelSpecExtractor(nn.Layer):
"""
Mel-Spectrogram Extractor from NVIDIA NEMO toolkit.
Parameters:
----------
sample_rate : int, default 16000
Sample rate of the input audio data.
window_size_sec : float, default 0.02
Size of window for FFT in seconds.
window_stride_sec : float, default 0.01
Stride of window for FFT in seconds.
n_fft : int, default 512
Length of FT window.
n_filters : int, default 64
Number of Mel spectrogram freq bins.
preemph : float, default 0.97
Amount of pre emphasis to add to audio.
dither : float, default 1.0e-05
Amount of white-noise dithering.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
sample_rate=16000,
window_size_sec=0.02,
window_stride_sec=0.01,
n_fft=512,
n_filters=64,
preemph=0.97,
dither=1.0e-05,
data_format="channels_last",
**kwargs):
super(NemoMelSpecExtractor, self).__init__(**kwargs)
self.data_format = data_format
self.log_zero_guard_value = 2 ** -24
win_length = int(window_size_sec * sample_rate)
self.hop_length = int(window_stride_sec * sample_rate)
self.n_filters = n_filters
from scipy import signal as scipy_signal
from librosa import stft as librosa_stft
window_arr = scipy_signal.hann(win_length, sym=True)
self.stft = lambda x: librosa_stft(
x,
n_fft=n_fft,
hop_length=self.hop_length,
win_length=win_length,
window=window_arr,
center=True)
self.window_arr_shape = window_arr.shape
self.dither = dither
self.preemph = preemph
self.pad_align = 16
from librosa.filters import mel as librosa_mel
self.fb_arr = librosa_mel(
sample_rate,
n_fft,
n_mels=n_filters,
fmin=0,
fmax=(sample_rate / 2))
def build(self, input_shape):
self.window = self.add_weight(
shape=self.window_arr_shape,
name="window",
initializer=initializers.get("zeros"),
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=False)
self.fb = self.add_weight(
shape=np.expand_dims(self.fb_arr, axis=0).shape,
name="fb",
initializer=initializers.get("zeros"),
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=False)
channel_axis = (1 if is_channels_first(self.data_format) else len(input_shape) - 1)
axes = {}
for i in range(1, len(input_shape)):
if i != channel_axis:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, x, training=None):
xs = x.numpy()
x_eps = 1e-5
batch = len(xs)
y_len = np.zeros((batch,), dtype=np.long)
ys = []
for i, xi in enumerate(xs):
y_len[i] = np.ceil(float(len(xi)) / self.hop_length).astype(np.long)
if self.dither > 0:
xi += self.dither * np.random.randn(*xi.shape)
xi = np.concatenate((xi[:1], xi[1:] - self.preemph * xi[:-1]), axis=0)
yi = self.stft(xi)
yi = np.abs(yi)
yi = np.square(yi)
yi = np.matmul(self.fb_arr, yi)
yi = np.log(yi + self.log_zero_guard_value)
assert (yi.shape[1] != 1)
yi_mean = yi.mean(axis=1)
yi_std = yi.std(axis=1)
yi_std += x_eps
yi = (yi - np.expand_dims(yi_mean, axis=-1)) / np.expand_dims(yi_std, axis=-1)
ys.append(yi)
channels = ys[0].shape[0]
x_len_max = max([yj.shape[-1] for yj in ys])
y = np.zeros((batch, channels, x_len_max), dtype=np.float32)
for i, yi in enumerate(ys):
x_len_i = y_len[i]
y[i, :, :x_len_i] = yi[:, :x_len_i]
pad_rem = x_len_max % self.pad_align
if pad_rem != 0:
y = np.pad(y, ((0, 0), (0, 0), (0, self.pad_align - pad_rem)))
if not is_channels_first(self.data_format):
y = y.swapaxes(1, 2)
x = tf.convert_to_tensor(y)
x_len = tf.convert_to_tensor(y_len)
return x, x_len
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x[0].size
num_macs = 0
return num_flops, num_macs
class CtcDecoder(object):
"""
CTC decoder (to decode a sequence of labels to words).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
"""
def __init__(self,
vocabulary):
super().__init__()
self.blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
def __call__(self,
predictions):
"""
Decode a sequence of labels to words.
Parameters:
----------
predictions : np.array of int or list of list of int
Tensor with predicted labels.
Returns:
-------
list of str
Words.
"""
hypotheses = []
for prediction in predictions:
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = "".join([self.labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
class MaskConv1d(Conv1d):
"""
Masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 1 int
Convolution window size.
strides : int or tuple/list of 1 int
Strides of the convolution.
padding : int or tuple/list of 1 int, default 0
Padding value for convolution layer.
dilation : int or tuple/list of 1 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_mask : bool, default True
Whether to use mask.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=0,
dilation=1,
groups=1,
use_bias=False,
use_mask=True,
data_format="channels_last",
**kwargs):
super(MaskConv1d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
self.use_mask = use_mask
self.data_format = data_format
if self.use_mask:
self.kernel_size = kernel_size[0] if isinstance(kernel_size, (list, tuple)) else kernel_size
self.strides = strides[0] if isinstance(strides, (list, tuple)) else strides
self.padding = padding[0] if isinstance(padding, (list, tuple)) else padding
self.dilation = dilation[0] if isinstance(dilation, (list, tuple)) else dilation
def call(self, x, x_len):
if self.use_mask:
if is_channels_first(self.data_format):
max_len = x.shape[2]
mask = tf.expand_dims(tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64), 0) <\
tf.expand_dims(x_len, -1)
mask = tf.broadcast_to(tf.expand_dims(mask, 1), x.shape)
x = tf.where(mask, x, tf.zeros(x.shape))
else:
max_len = x.shape[1]
mask = tf.expand_dims(tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64), 0) <\
tf.expand_dims(x_len, -1)
mask = tf.broadcast_to(tf.expand_dims(mask, -1), x.shape)
x = tf.where(mask, x, tf.zeros(x.shape))
x_len = (x_len + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) // self.strides + 1
x = super(MaskConv1d, self).call(x)
return x, x_len
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
class MaskConvBlock1d(nn.Layer):
"""
Masked 1D convolution block with batch normalization, activation, and dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
dropout_rate=0.0,
data_format="channels_last",
**kwargs):
super(MaskConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.conv = MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
x, x_len = self.conv(x, x_len)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
return x, x_len
def mask_conv1d1_block(in_channels,
out_channels,
strides=1,
padding=0,
data_format="channels_last",
**kwargs):
"""
1-dim kernel version of the masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 0
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class ChannelShuffle1d(nn.Layer):
"""
1D version of the channel shuffle layer.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
groups,
data_format="channels_last",
**kwargs):
super(ChannelShuffle1d, self).__init__(**kwargs)
assert (channels % groups == 0)
self.groups = groups
self.data_format = data_format
def call(self, x, training=None):
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
channels = x_shape[1]
seq_len = x_shape[2]
else:
seq_len = x_shape[1]
channels = x_shape[2]
assert (channels % self.groups == 0)
channels_per_group = channels // self.groups
if is_channels_first(self.data_format):
x = tf.reshape(x, shape=(-1, self.groups, channels_per_group, seq_len))
x = tf.transpose(x, perm=(0, 2, 1, 3))
x = tf.reshape(x, shape=(-1, channels, seq_len))
else:
x = tf.reshape(x, shape=(-1, seq_len, self.groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 3, 2))
x = tf.reshape(x, shape=(-1, seq_len, channels))
return x
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
class DwsConvBlock1d(nn.Layer):
"""
Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel
shuffle.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
dropout_rate=0.0,
data_format="channels_last",
**kwargs):
super(DwsConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.use_channel_shuffle = (groups > 1)
self.dw_conv = MaskConv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=in_channels,
use_bias=use_bias,
data_format=data_format,
name="dw_conv")
self.pw_conv = mask_conv1d1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="pw_conv")
if self.use_channel_shuffle:
self.shuffle = ChannelShuffle1d(
channels=out_channels,
groups=groups,
data_format=data_format,
name="shuffle")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
x, x_len = self.dw_conv(x, x_len)
x, x_len = self.pw_conv(x, x_len)
if self.use_channel_shuffle:
x = self.shuffle(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
return x, x_len
class JasperUnit(nn.Layer):
"""
Jasper unit with residual connection.
Parameters:
----------
in_channels : int or list of int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
bn_eps,
dropout_rate,
repeat,
use_dw,
use_dr,
data_format="channels_last",
**kwargs):
super(JasperUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
self.use_dr = use_dr
block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
if self.use_dr:
self.identity_block = DualPathParallelConcurent(name="identity_block")
for i, dense_in_channels_i in enumerate(in_channels):
self.identity_block.add(mask_conv1d1_block(
in_channels=dense_in_channels_i,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = in_channels[-1]
else:
self.identity_block = mask_conv1d1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None,
data_format=data_format,
name="identity_block")
self.body = DualPathSequential(name="body")
for i in range(repeat):
activation = "relu" if i < repeat - 1 else None
dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0
self.body.add(block_class(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=(kernel_size // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rate_i,
activation=activation,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = out_channels
self.activ = nn.ReLU()
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
if self.use_dr:
x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None)
y = [x] if y is None else y + [x]
y_len = [x_len] if y_len is None else y_len + [x_len]
identity, _ = self.identity_block(y, y_len, training=training)
identity = tf.stack(identity, axis=1)
identity = tf.math.reduce_sum(identity, axis=1)
else:
identity, _ = self.identity_block(x, x_len, training=training)
x, x_len = self.body(x, x_len, training=training)
x = x + identity
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
if self.use_dr:
return x, (x_len, y, y_len)
else:
return x, x_len
class JasperFinalBlock(nn.Layer):
"""
Jasper specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of int
Number of output channels for each block.
kernel_sizes : list of int
Kernel sizes for each block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each block.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
use_dw,
use_dr,
data_format="channels_last",
**kwargs):
super(JasperFinalBlock, self).__init__(**kwargs)
self.use_dr = use_dr
conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.conv1 = conv1_class(
in_channels=in_channels,
out_channels=channels[-2],
kernel_size=kernel_sizes[-2],
strides=1,
padding=(2 * kernel_sizes[-2] // 2 - 1),
dilation=2,
bn_eps=bn_eps,
dropout_rate=dropout_rates[-2],
data_format=data_format,
name="conv1")
self.conv2 = MaskConvBlock1d(
in_channels=channels[-2],
out_channels=channels[-1],
kernel_size=kernel_sizes[-1],
strides=1,
padding=(kernel_sizes[-1] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[-1],
data_format=data_format,
name="conv2")
def call(self, x, x_len, training=None):
if self.use_dr:
x_len = x_len[0]
x, x_len = self.conv1(x, x_len, training=training)
x, x_len = self.conv2(x, x_len, training=training)
return x, x_len
class Jasper(tf.keras.Model):
"""
Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
channels : list of int
Number of output channels for each unit and initial/final block.
kernel_sizes : list of int
Kernel sizes for each unit and initial/final block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each unit and initial/final block.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
from_audio : bool, default True
Whether to treat input as audio instead of Mel-specs.
dither : float, default 0.0
Amount of white-noise dithering.
return_text : bool, default False
Whether to return text instead of logits.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
in_channels : int, default 64
Number of input channels (audio features).
classes : int, default 29
Number of classification classes (number of graphemes).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
repeat,
use_dw,
use_dr,
from_audio=True,
dither=0.0,
return_text=False,
vocabulary=None,
in_channels=64,
classes=29,
data_format="channels_last",
**kwargs):
super(Jasper, self).__init__(**kwargs)
self.in_size = in_channels
self.in_channels = in_channels
self.classes = classes
self.vocabulary = vocabulary
self.data_format = data_format
self.from_audio = from_audio
self.return_text = return_text
if self.from_audio:
self.preprocessor = NemoMelSpecExtractor(
dither=dither,
data_format=data_format,
name="preprocessor")
self.features = DualPathSequential(name="features")
init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=channels[0],
kernel_size=kernel_sizes[0],
strides=2,
padding=(kernel_sizes[0] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[0],
data_format=data_format,
name="init_block"))
in_channels = channels[0]
in_channels_list = []
for i, (out_channels, kernel_size, dropout_rate) in \
enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])):
in_channels_list += [in_channels]
self.features.add(JasperUnit(
in_channels=(in_channels_list if use_dr else in_channels),
out_channels=out_channels,
kernel_size=kernel_size,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
self.features.add(JasperFinalBlock(
in_channels=in_channels,
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
use_dw=use_dw,
use_dr=use_dr,
data_format=data_format,
name="final_block"))
in_channels = channels[-1]
self.output1 = conv1d1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="output1")
if self.return_text:
self.ctc_decoder = CtcDecoder(vocabulary=vocabulary)
def call(self, x, x_len=None, training=None):
if x_len is None:
assert (type(x) in (list, tuple))
x, x_len = x
if self.from_audio:
x, x_len = self.preprocessor(x, training=training)
x, x_len = self.features(x, x_len, training=training)
x = self.output1(x)
if self.return_text:
greedy_predictions = x.swapaxes(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).asnumpy()
return self.ctc_decoder(greedy_predictions)
else:
return x, x_len
def get_jasper(version,
use_dw=False,
use_dr=False,
bn_eps=1e-3,
vocabulary=None,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Jasper/DR/QuartzNet model with specific parameters.
Parameters:
----------
version : tuple of str
Model type and configuration.
use_dw : bool, default False
Whether to use depthwise block.
use_dr : bool, default False
Whether to use dense residual scheme.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
import numpy as np
blocks, repeat = tuple(map(int, version[1].split("x")))
main_stage_repeat = blocks // 5
model_type = version[0]
if model_type == "jasper":
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
elif model_type == "quartznet":
channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024]
kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1]
dropout_rates_per_stage = [0.0] * 8
else:
raise ValueError("Unsupported Jasper family model type: {}".format(model_type))
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = Jasper(
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
vocabulary=vocabulary,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
seq_len = 100
x_shape = (1, seq_len * 640) if net.from_audio else (
(1, net.in_size, seq_len) if is_channels_first(net.data_format) else (1, seq_len, net.in_size))
x = tf.random.normal(x_shape)
x_len = tf.convert_to_tensor(np.array([seq_len], np.long))
net(x, x_len)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def jasper5x3(**kwargs):
"""
Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs)
def jasper10x4(**kwargs):
"""
Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
def jasper10x5(**kwargs):
"""
Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs)
def _test():
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
from_audio = True
# from_audio = False
audio_features = 64
classes = 29
models = [
jasper5x3,
jasper10x4,
jasper10x5,
]
for model in models:
net = model(
in_channels=audio_features,
classes=classes,
from_audio=from_audio,
pretrained=pretrained,
data_format=data_format)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (
(batch, audio_features, seq_len_max) if is_channels_first(data_format) else
(batch, seq_len_max, audio_features))
x = tf.random.normal(shape=x_shape)
x_len = tf.convert_to_tensor(seq_len.astype(np.long))
y, y_len = net(x, x_len)
assert (y.shape.as_list()[0] == batch)
classes_id = 1 if is_channels_first(data_format) else 2
seq_id = 2 if is_channels_first(data_format) else 1
assert (y.shape.as_list()[classes_id] == net.classes)
if from_audio:
assert (y.shape.as_list()[seq_id] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape.as_list()[seq_id] in [seq_len_max // 2, seq_len_max // 2 + 1])
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasper5x3 or weight_count == 107681053)
assert (model != jasper10x4 or weight_count == 261393693)
assert (model != jasper10x5 or weight_count == 322286877)
if __name__ == "__main__":
_test()
| 39,745 | 32.176962 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow2/tf2cv/models/resneta.py | """
ResNet(A) with average downsampling for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, AvgPool2d, SimpleSequential, is_channels_first
from .resnet import ResBlock, ResBottleneck
from .senet import SEInitBlock
class ResADownBlock(nn.Layer):
"""
ResNet(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation=1,
data_format="channels_last",
**kwargs):
super(ResADownBlock, self).__init__(**kwargs)
self.pool = AvgPool2d(
pool_size=(strides if dilation == 1 else 1),
strides=(strides if dilation == 1 else 1),
ceil_mode=True,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="conv")
def call(self, x, training=None):
x = self.pool(x)
x = self.conv(x, training=training)
return x
class ResAUnit(nn.Layer):
"""
ResNet(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
bottleneck=True,
conv1_stride=False,
data_format="channels_last",
**kwargs):
super(ResAUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_block = ResADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
data_format=data_format,
name="identity_block")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_block(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class ResNetA(tf.keras.Model):
"""
ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
dilated : bool, default False
Whether to use dilation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
dilated=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNetA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = SimpleSequential(name="features")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if dilated:
strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
else:
strides = 2 if (j == 0) and (i != 0) else 1
dilation = 1
stage.add(ResAUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D(
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x)
return x
def get_resneta(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def resneta10(**kwargs):
"""
ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=10, model_name="resneta10", **kwargs)
def resnetabc14b(**kwargs):
"""
ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs)
def resneta18(**kwargs):
"""
ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=18, model_name="resneta18", **kwargs)
def resneta50b(**kwargs):
"""
ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs)
def resneta101b(**kwargs):
"""
ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs)
def resneta152b(**kwargs):
"""
ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resneta10,
resnetabc14b,
resneta18,
resneta50b,
resneta101b,
resneta152b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resneta10 or weight_count == 5438024)
assert (model != resnetabc14b or weight_count == 10084168)
assert (model != resneta18 or weight_count == 11708744)
assert (model != resneta50b or weight_count == 25576264)
assert (model != resneta101b or weight_count == 44568392)
assert (model != resneta152b or weight_count == 60212040)
if __name__ == "__main__":
_test()
| 15,634 | 33.667406 | 115 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.