Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/huggingface__datasets/.gitignore +67 -0
- testbed/huggingface__datasets/setup.cfg +10 -0
- testbed/huggingface__huggingface_hub/.github/workflows/python-release.yml +33 -0
- testbed/huggingface__huggingface_hub/Makefile +18 -0
- testbed/huggingface__pytorch-image-models/.gitattributes +1 -0
- testbed/huggingface__pytorch-image-models/.github/FUNDING.yml +2 -0
- testbed/huggingface__pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md +32 -0
- testbed/huggingface__pytorch-image-models/.github/workflows/tests.yml +51 -0
- testbed/huggingface__pytorch-image-models/.gitignore +108 -0
- testbed/huggingface__pytorch-image-models/LICENSE +201 -0
- testbed/huggingface__pytorch-image-models/MANIFEST.in +2 -0
- testbed/huggingface__pytorch-image-models/README.md +423 -0
- testbed/huggingface__pytorch-image-models/avg_checkpoints.py +117 -0
- testbed/huggingface__pytorch-image-models/benchmark.py +481 -0
- testbed/huggingface__pytorch-image-models/clean_checkpoint.py +82 -0
- testbed/huggingface__pytorch-image-models/convert/convert_nest_flax.py +109 -0
- testbed/huggingface__pytorch-image-models/distributed_train.sh +5 -0
- testbed/huggingface__pytorch-image-models/docs/archived_changes.md +205 -0
- testbed/huggingface__pytorch-image-models/docs/changes.md +130 -0
- testbed/huggingface__pytorch-image-models/docs/feature_extraction.md +173 -0
- testbed/huggingface__pytorch-image-models/docs/index.md +80 -0
- testbed/huggingface__pytorch-image-models/docs/models.md +171 -0
- testbed/huggingface__pytorch-image-models/docs/models/.templates/models/advprop.md +457 -0
- testbed/huggingface__pytorch-image-models/docs/models/.templates/models/big-transfer.md +295 -0
- testbed/huggingface__pytorch-image-models/docs/models/.templates/models/csp-darknet.md +81 -0
- testbed/huggingface__pytorch-image-models/docs/models/.templates/models/dpn.md +256 -0
- testbed/huggingface__pytorch-image-models/docs/models/gloun-senet.md +124 -0
- testbed/huggingface__pytorch-image-models/docs/models/gloun-seresnext.md +197 -0
- testbed/huggingface__pytorch-image-models/docs/models/inception-resnet-v2.md +133 -0
- testbed/huggingface__pytorch-image-models/docs/models/inception-v3.md +146 -0
- testbed/huggingface__pytorch-image-models/docs/models/legacy-se-resnet.md +318 -0
- testbed/huggingface__pytorch-image-models/docs/models/legacy-senet.md +135 -0
- testbed/huggingface__pytorch-image-models/docs/models/mobilenet-v2.md +271 -0
- testbed/huggingface__pytorch-image-models/docs/models/nasnet.md +131 -0
- testbed/huggingface__pytorch-image-models/docs/models/noisy-student.md +571 -0
- testbed/huggingface__pytorch-image-models/docs/models/pnasnet.md +132 -0
- testbed/huggingface__pytorch-image-models/docs/models/res2net.md +321 -0
- testbed/huggingface__pytorch-image-models/docs/models/resnest.md +469 -0
- testbed/huggingface__pytorch-image-models/docs/models/resnet-d.md +324 -0
- testbed/huggingface__pytorch-image-models/docs/models/resnet.md +439 -0
- testbed/huggingface__pytorch-image-models/docs/models/resnext.md +244 -0
- testbed/huggingface__pytorch-image-models/docs/models/rexnet.md +258 -0
- testbed/huggingface__pytorch-image-models/docs/models/se-resnet.md +183 -0
- testbed/huggingface__pytorch-image-models/docs/models/selecsls.md +197 -0
- testbed/huggingface__pytorch-image-models/docs/models/seresnext.md +228 -0
- testbed/huggingface__pytorch-image-models/docs/models/skresnet.md +173 -0
- testbed/huggingface__pytorch-image-models/docs/models/skresnext.md +131 -0
- testbed/huggingface__pytorch-image-models/docs/models/spnasnet.md +123 -0
- testbed/huggingface__pytorch-image-models/docs/models/ssl-resnet.md +192 -0
- testbed/huggingface__pytorch-image-models/docs/models/ssl-resnext.md +278 -0
testbed/huggingface__datasets/.gitignore
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Locked files
|
| 2 |
+
*.lock
|
| 3 |
+
!dvc.lock
|
| 4 |
+
|
| 5 |
+
# Extracted dummy data
|
| 6 |
+
datasets/**/dummy_data-zip-extracted/
|
| 7 |
+
|
| 8 |
+
# Compiled python modules.
|
| 9 |
+
*.pyc
|
| 10 |
+
|
| 11 |
+
# Byte-compiled
|
| 12 |
+
_pycache__/
|
| 13 |
+
.cache/
|
| 14 |
+
|
| 15 |
+
# Python egg metadata, regenerated from source files by setuptools.
|
| 16 |
+
*.egg-info
|
| 17 |
+
.eggs/
|
| 18 |
+
|
| 19 |
+
# PyPI distribution artifacts.
|
| 20 |
+
build/
|
| 21 |
+
dist/
|
| 22 |
+
|
| 23 |
+
# Environments
|
| 24 |
+
.env
|
| 25 |
+
.venv
|
| 26 |
+
env/
|
| 27 |
+
venv/
|
| 28 |
+
ENV/
|
| 29 |
+
env.bak/
|
| 30 |
+
venv.bak/
|
| 31 |
+
|
| 32 |
+
# pyenv
|
| 33 |
+
.python-version
|
| 34 |
+
|
| 35 |
+
# Tests
|
| 36 |
+
.pytest_cache/
|
| 37 |
+
|
| 38 |
+
# Other
|
| 39 |
+
*.DS_Store
|
| 40 |
+
|
| 41 |
+
# PyCharm/vscode
|
| 42 |
+
.idea
|
| 43 |
+
.vscode
|
| 44 |
+
|
| 45 |
+
# keep only the empty datasets and metrics directory with it's __init__.py file
|
| 46 |
+
/src/*/datasets/*
|
| 47 |
+
!/src/*/datasets/__init__.py
|
| 48 |
+
|
| 49 |
+
/src/*/metrics/*
|
| 50 |
+
!/src/*/metrics/__init__.py
|
| 51 |
+
|
| 52 |
+
# Vim
|
| 53 |
+
.*.swp
|
| 54 |
+
|
| 55 |
+
# playground
|
| 56 |
+
/playground
|
| 57 |
+
|
| 58 |
+
# Sphinx documentation
|
| 59 |
+
docs/_build/
|
| 60 |
+
docs/source/_build/
|
| 61 |
+
|
| 62 |
+
# Benchmark results
|
| 63 |
+
report.json
|
| 64 |
+
report.md
|
| 65 |
+
|
| 66 |
+
# Ruff
|
| 67 |
+
.ruff_cache
|
testbed/huggingface__datasets/setup.cfg
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[metadata]
|
| 2 |
+
license_files = LICENSE
|
| 3 |
+
|
| 4 |
+
[tool:pytest]
|
| 5 |
+
# Test fails if a FutureWarning is thrown by `huggingface_hub`
|
| 6 |
+
filterwarnings =
|
| 7 |
+
error::FutureWarning:huggingface_hub*
|
| 8 |
+
markers =
|
| 9 |
+
unit: unit test
|
| 10 |
+
integration: integration test
|
testbed/huggingface__huggingface_hub/.github/workflows/python-release.yml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Python release
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
tags:
|
| 6 |
+
- v*
|
| 7 |
+
|
| 8 |
+
env:
|
| 9 |
+
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN_DIST }}
|
| 10 |
+
|
| 11 |
+
jobs:
|
| 12 |
+
python_release:
|
| 13 |
+
runs-on: ubuntu-latest
|
| 14 |
+
|
| 15 |
+
steps:
|
| 16 |
+
- uses: actions/checkout@v2
|
| 17 |
+
- name: Set up Python
|
| 18 |
+
uses: actions/setup-python@v2
|
| 19 |
+
with:
|
| 20 |
+
python-version: 3.9
|
| 21 |
+
- name: Install dependencies
|
| 22 |
+
run: |
|
| 23 |
+
pip install --upgrade pip
|
| 24 |
+
pip install setuptools wheel
|
| 25 |
+
|
| 26 |
+
- run: python setup.py sdist bdist_wheel
|
| 27 |
+
|
| 28 |
+
- run: |
|
| 29 |
+
pip install twine
|
| 30 |
+
|
| 31 |
+
- name: Upload to PyPi
|
| 32 |
+
run: |
|
| 33 |
+
twine upload dist/* -u __token__ -p "$PYPI_TOKEN"
|
testbed/huggingface__huggingface_hub/Makefile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.PHONY: quality style test
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
check_dirs := tests src
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
quality:
|
| 8 |
+
black --check $(check_dirs)
|
| 9 |
+
isort --check-only $(check_dirs)
|
| 10 |
+
flake8 $(check_dirs)
|
| 11 |
+
|
| 12 |
+
style:
|
| 13 |
+
black $(check_dirs)
|
| 14 |
+
isort $(check_dirs)
|
| 15 |
+
|
| 16 |
+
test:
|
| 17 |
+
HUGGINGFACE_CO_STAGING=1 pytest -sv ./tests/
|
| 18 |
+
|
testbed/huggingface__pytorch-image-models/.gitattributes
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.ipynb linguist-documentation
|
testbed/huggingface__pytorch-image-models/.github/FUNDING.yml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# These are supported funding model platforms
|
| 2 |
+
github: rwightman
|
testbed/huggingface__pytorch-image-models/.github/ISSUE_TEMPLATE/bug_report.md
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
name: Bug report
|
| 3 |
+
about: Create a bug report to help us improve. Issues are for reporting bugs or requesting
|
| 4 |
+
features, the discussion forum is available for asking questions or seeking help
|
| 5 |
+
from the community.
|
| 6 |
+
title: "[BUG] Issue title..."
|
| 7 |
+
labels: bug
|
| 8 |
+
assignees: rwightman
|
| 9 |
+
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
**Describe the bug**
|
| 13 |
+
A clear and concise description of what the bug is.
|
| 14 |
+
|
| 15 |
+
**To Reproduce**
|
| 16 |
+
Steps to reproduce the behavior:
|
| 17 |
+
1.
|
| 18 |
+
2.
|
| 19 |
+
|
| 20 |
+
**Expected behavior**
|
| 21 |
+
A clear and concise description of what you expected to happen.
|
| 22 |
+
|
| 23 |
+
**Screenshots**
|
| 24 |
+
If applicable, add screenshots to help explain your problem.
|
| 25 |
+
|
| 26 |
+
**Desktop (please complete the following information):**
|
| 27 |
+
- OS: [e.g. Windows 10, Ubuntu 18.04]
|
| 28 |
+
- This repository version [e.g. pip 0.3.1 or commit ref]
|
| 29 |
+
- PyTorch version w/ CUDA/cuDNN [e.g. from `conda list`, 1.7.0 py3.8_cuda11.0.221_cudnn8.0.3_0]
|
| 30 |
+
|
| 31 |
+
**Additional context**
|
| 32 |
+
Add any other context about the problem here.
|
testbed/huggingface__pytorch-image-models/.github/workflows/tests.yml
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Python tests
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ master ]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [ master ]
|
| 8 |
+
|
| 9 |
+
env:
|
| 10 |
+
OMP_NUM_THREADS: 2
|
| 11 |
+
MKL_NUM_THREADS: 2
|
| 12 |
+
|
| 13 |
+
jobs:
|
| 14 |
+
test:
|
| 15 |
+
name: Run tests on ${{ matrix.os }} with Python ${{ matrix.python }}
|
| 16 |
+
strategy:
|
| 17 |
+
matrix:
|
| 18 |
+
os: [ubuntu-latest, macOS-latest]
|
| 19 |
+
python: ['3.8']
|
| 20 |
+
torch: ['1.9.0']
|
| 21 |
+
torchvision: ['0.10.0']
|
| 22 |
+
runs-on: ${{ matrix.os }}
|
| 23 |
+
|
| 24 |
+
steps:
|
| 25 |
+
- uses: actions/checkout@v2
|
| 26 |
+
- name: Set up Python ${{ matrix.python }}
|
| 27 |
+
uses: actions/setup-python@v1
|
| 28 |
+
with:
|
| 29 |
+
python-version: ${{ matrix.python }}
|
| 30 |
+
- name: Install testing dependencies
|
| 31 |
+
run: |
|
| 32 |
+
python -m pip install --upgrade pip
|
| 33 |
+
pip install pytest pytest-timeout
|
| 34 |
+
- name: Install torch on mac
|
| 35 |
+
if: startsWith(matrix.os, 'macOS')
|
| 36 |
+
run: pip install --no-cache-dir torch==${{ matrix.torch }} torchvision==${{ matrix.torchvision }}
|
| 37 |
+
- name: Install torch on ubuntu
|
| 38 |
+
if: startsWith(matrix.os, 'ubuntu')
|
| 39 |
+
run: |
|
| 40 |
+
pip install --no-cache-dir torch==${{ matrix.torch }}+cpu torchvision==${{ matrix.torchvision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
| 41 |
+
sudo apt update
|
| 42 |
+
sudo apt install -y google-perftools
|
| 43 |
+
- name: Install requirements
|
| 44 |
+
run: |
|
| 45 |
+
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
| 46 |
+
pip install --no-cache-dir git+https://github.com/mapillary/inplace_abn.git@v1.1.0
|
| 47 |
+
- name: Run tests
|
| 48 |
+
env:
|
| 49 |
+
LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
|
| 50 |
+
run: |
|
| 51 |
+
pytest -vv --durations=0 ./tests
|
testbed/huggingface__pytorch-image-models/.gitignore
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# PyInstaller
|
| 29 |
+
# Usually these files are written by a python script from a template
|
| 30 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 31 |
+
*.manifest
|
| 32 |
+
*.spec
|
| 33 |
+
|
| 34 |
+
# Installer logs
|
| 35 |
+
pip-log.txt
|
| 36 |
+
pip-delete-this-directory.txt
|
| 37 |
+
|
| 38 |
+
# Unit test / coverage reports
|
| 39 |
+
htmlcov/
|
| 40 |
+
.tox/
|
| 41 |
+
.coverage
|
| 42 |
+
.coverage.*
|
| 43 |
+
.cache
|
| 44 |
+
nosetests.xml
|
| 45 |
+
coverage.xml
|
| 46 |
+
*.cover
|
| 47 |
+
.hypothesis/
|
| 48 |
+
.pytest_cache/
|
| 49 |
+
|
| 50 |
+
# Translations
|
| 51 |
+
*.mo
|
| 52 |
+
*.pot
|
| 53 |
+
|
| 54 |
+
# Django stuff:
|
| 55 |
+
*.log
|
| 56 |
+
local_settings.py
|
| 57 |
+
db.sqlite3
|
| 58 |
+
|
| 59 |
+
# Flask stuff:
|
| 60 |
+
instance/
|
| 61 |
+
.webassets-cache
|
| 62 |
+
|
| 63 |
+
# Scrapy stuff:
|
| 64 |
+
.scrapy
|
| 65 |
+
|
| 66 |
+
# Sphinx documentation
|
| 67 |
+
docs/_build/
|
| 68 |
+
|
| 69 |
+
# PyBuilder
|
| 70 |
+
target/
|
| 71 |
+
|
| 72 |
+
# Jupyter Notebook
|
| 73 |
+
.ipynb_checkpoints
|
| 74 |
+
|
| 75 |
+
# pyenv
|
| 76 |
+
.python-version
|
| 77 |
+
|
| 78 |
+
# celery beat schedule file
|
| 79 |
+
celerybeat-schedule
|
| 80 |
+
|
| 81 |
+
# SageMath parsed files
|
| 82 |
+
*.sage.py
|
| 83 |
+
|
| 84 |
+
# Environments
|
| 85 |
+
.env
|
| 86 |
+
.venv
|
| 87 |
+
env/
|
| 88 |
+
venv/
|
| 89 |
+
ENV/
|
| 90 |
+
env.bak/
|
| 91 |
+
venv.bak/
|
| 92 |
+
|
| 93 |
+
# Spyder project settings
|
| 94 |
+
.spyderproject
|
| 95 |
+
.spyproject
|
| 96 |
+
|
| 97 |
+
# Rope project settings
|
| 98 |
+
.ropeproject
|
| 99 |
+
|
| 100 |
+
# PyCharm
|
| 101 |
+
.idea
|
| 102 |
+
|
| 103 |
+
# PyTorch weights
|
| 104 |
+
*.tar
|
| 105 |
+
*.pth
|
| 106 |
+
*.gz
|
| 107 |
+
Untitled.ipynb
|
| 108 |
+
Testing notebook.ipynb
|
testbed/huggingface__pytorch-image-models/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "{}"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2019 Ross Wightman
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
testbed/huggingface__pytorch-image-models/MANIFEST.in
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include timm/models/pruned/*.txt
|
| 2 |
+
|
testbed/huggingface__pytorch-image-models/README.md
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PyTorch Image Models
|
| 2 |
+
- [Sponsors](#sponsors)
|
| 3 |
+
- [What's New](#whats-new)
|
| 4 |
+
- [Introduction](#introduction)
|
| 5 |
+
- [Models](#models)
|
| 6 |
+
- [Features](#features)
|
| 7 |
+
- [Results](#results)
|
| 8 |
+
- [Getting Started (Documentation)](#getting-started-documentation)
|
| 9 |
+
- [Train, Validation, Inference Scripts](#train-validation-inference-scripts)
|
| 10 |
+
- [Awesome PyTorch Resources](#awesome-pytorch-resources)
|
| 11 |
+
- [Licenses](#licenses)
|
| 12 |
+
- [Citing](#citing)
|
| 13 |
+
|
| 14 |
+
## Sponsors
|
| 15 |
+
|
| 16 |
+
A big thank you to my [GitHub Sponsors](https://github.com/sponsors/rwightman) for their support!
|
| 17 |
+
|
| 18 |
+
In addition to the sponsors at the link above, I've received hardware and/or cloud resources from
|
| 19 |
+
* Nvidia (https://www.nvidia.com/en-us/)
|
| 20 |
+
* TFRC (https://www.tensorflow.org/tfrc)
|
| 21 |
+
|
| 22 |
+
I'm fortunate to be able to dedicate significant time and money of my own supporting this and other open source projects. However, as the projects increase in scope, outside support is needed to continue with the current trajectory of hardware, infrastructure, and electricty costs.
|
| 23 |
+
|
| 24 |
+
## What's New
|
| 25 |
+
|
| 26 |
+
### Aug 18, 2021
|
| 27 |
+
* Optimizer bonanza!
|
| 28 |
+
* Add LAMB and LARS optimizers, incl trust ratio clipping options. Tweaked to work properly in PyTorch XLA (tested on TPUs w/ `timm bits` [branch](https://github.com/rwightman/pytorch-image-models/tree/bits_and_tpu/timm/bits))
|
| 29 |
+
* Add MADGRAD from FB research w/ a few tweaks (decoupled decay option, step handling that works with PyTorch XLA)
|
| 30 |
+
* Some cleanup on all optimizers and factory. No more `.data`, a bit more consistency, unit tests for all!
|
| 31 |
+
* SGDP and AdamP still won't work with PyTorch XLA but others should (have yet to test Adabelief, Adafactor, Adahessian myself).
|
| 32 |
+
* EfficientNet-V2 XL TF ported weights added, but they don't validate well in PyTorch (L is better). The pre-processing for the V2 TF training is a bit diff and the fine-tuned 21k -> 1k weights are very sensitive and less robust than the 1k weights.
|
| 33 |
+
* Added PyTorch trained EfficientNet-V2 'Tiny' w/ GlobalContext attn weights. Only .1-.2 top-1 better than the SE so more of a curiosity for those interested.
|
| 34 |
+
|
| 35 |
+
### July 12, 2021
|
| 36 |
+
* Add XCiT models from [official facebook impl](https://github.com/facebookresearch/xcit). Contributed by [Alexander Soare](https://github.com/alexander-soare)
|
| 37 |
+
|
| 38 |
+
### July 5-9, 2021
|
| 39 |
+
* Add `efficientnetv2_rw_t` weights, a custom 'tiny' 13.6M param variant that is a bit better than (non NoisyStudent) B3 models. Both faster and better accuracy (at same or lower res)
|
| 40 |
+
* top-1 82.34 @ 288x288 and 82.54 @ 320x320
|
| 41 |
+
* Add [SAM pretrained](https://arxiv.org/abs/2106.01548) in1k weight for ViT B/16 (`vit_base_patch16_sam_224`) and B/32 (`vit_base_patch32_sam_224`) models.
|
| 42 |
+
* Add 'Aggregating Nested Transformer' (NesT) w/ weights converted from official [Flax impl](https://github.com/google-research/nested-transformer). Contributed by [Alexander Soare](https://github.com/alexander-soare).
|
| 43 |
+
* `jx_nest_base` - 83.534, `jx_nest_small` - 83.120, `jx_nest_tiny` - 81.426
|
| 44 |
+
|
| 45 |
+
### June 23, 2021
|
| 46 |
+
* Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050). Hparams for this and other recent MLP training [here](https://gist.github.com/rwightman/d6c264a9001f9167e06c209f630b2cc6)
|
| 47 |
+
|
| 48 |
+
### June 20, 2021
|
| 49 |
+
* Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
|
| 50 |
+
* .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg)
|
| 51 |
+
* See [example notebook](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb) from [official impl](https://github.com/google-research/vision_transformer/) for navigating the augreg weights
|
| 52 |
+
* Replaced all default weights w/ best AugReg variant (if possible). All AugReg 21k classifiers work.
|
| 53 |
+
* Highlights: `vit_large_patch16_384` (87.1 top-1), `vit_large_r50_s32_384` (86.2 top-1), `vit_base_patch16_384` (86.0 top-1)
|
| 54 |
+
* `vit_deit_*` renamed to just `deit_*`
|
| 55 |
+
* Remove my old small model, replace with DeiT compatible small w/ AugReg weights
|
| 56 |
+
* Add 1st training of my `gmixer_24_224` MLP /w GLU, 78.1 top-1 w/ 25M params.
|
| 57 |
+
* Add weights from official ResMLP release (https://github.com/facebookresearch/deit)
|
| 58 |
+
* Add `eca_nfnet_l2` weights from my 'lightweight' series. 84.7 top-1 at 384x384.
|
| 59 |
+
* Add distilled BiT 50x1 student and 152x2 Teacher weights from [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237)
|
| 60 |
+
* NFNets and ResNetV2-BiT models work w/ Pytorch XLA now
|
| 61 |
+
* weight standardization uses F.batch_norm instead of std_mean (std_mean wasn't lowered)
|
| 62 |
+
* eps values adjusted, will be slight differences but should be quite close
|
| 63 |
+
* Improve test coverage and classifier interface of non-conv (vision transformer and mlp) models
|
| 64 |
+
* Cleanup a few classifier / flatten details for models w/ conv classifiers or early global pool
|
| 65 |
+
* Please report any regressions, this PR touched quite a few models.
|
| 66 |
+
|
| 67 |
+
### June 8, 2021
|
| 68 |
+
* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1.
|
| 69 |
+
* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1.
|
| 70 |
+
* NFNet inspired block layout with quad layer stem and no maxpool
|
| 71 |
+
* Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288
|
| 72 |
+
|
| 73 |
+
### May 25, 2021
|
| 74 |
+
* Add LeViT, Visformer, ConViT (PR by Aman Arora), Twins (PR by paper authors) transformer models
|
| 75 |
+
* Add ResMLP and gMLP MLP vision models to the existing MLP Mixer impl
|
| 76 |
+
* Fix a number of torchscript issues with various vision transformer models
|
| 77 |
+
* Cleanup input_size/img_size override handling and improve testing / test coverage for all vision transformer and MLP models
|
| 78 |
+
* More flexible pos embedding resize (non-square) for ViT and TnT. Thanks [Alexander Soare](https://github.com/alexander-soare)
|
| 79 |
+
* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params.
|
| 80 |
+
|
| 81 |
+
### May 14, 2021
|
| 82 |
+
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
|
| 83 |
+
* 1k trained variants: `tf_efficientnetv2_s/m/l`
|
| 84 |
+
* 21k trained variants: `tf_efficientnetv2_s/m/l_in21k`
|
| 85 |
+
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k`
|
| 86 |
+
* v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3`
|
| 87 |
+
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
|
| 88 |
+
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training
|
| 89 |
+
|
| 90 |
+
### May 5, 2021
|
| 91 |
+
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
|
| 92 |
+
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
|
| 93 |
+
* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora)
|
| 94 |
+
* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin)
|
| 95 |
+
* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23)
|
| 96 |
+
* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai)
|
| 97 |
+
* Update ByoaNet attention modules
|
| 98 |
+
* Improve SA module inits
|
| 99 |
+
* Hack together experimental stand-alone Swin based attn module and `swinnet`
|
| 100 |
+
* Consistent '26t' model defs for experiments.
|
| 101 |
+
* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1.
|
| 102 |
+
* WandB logging support
|
| 103 |
+
|
| 104 |
+
### April 13, 2021
|
| 105 |
+
* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer
|
| 106 |
+
|
| 107 |
+
### April 12, 2021
|
| 108 |
+
* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256.
|
| 109 |
+
* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training.
|
| 110 |
+
* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs
|
| 111 |
+
* Lambda Networks - https://arxiv.org/abs/2102.08602
|
| 112 |
+
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
|
| 113 |
+
* Halo Nets - https://arxiv.org/abs/2103.12731
|
| 114 |
+
* Adabelief optimizer contributed by Juntang Zhuang
|
| 115 |
+
|
| 116 |
+
### April 1, 2021
|
| 117 |
+
* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference
|
| 118 |
+
* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit)
|
| 119 |
+
* Merged distilled variant into main for torchscript compatibility
|
| 120 |
+
* Some `timm` cleanup/style tweaks and weights have hub download support
|
| 121 |
+
* Cleanup Vision Transformer (ViT) models
|
| 122 |
+
* Merge distilled (DeiT) model into main so that torchscript can work
|
| 123 |
+
* Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch)
|
| 124 |
+
* Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids
|
| 125 |
+
* Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants
|
| 126 |
+
* nn.Sequential for block stack (does not break downstream compat)
|
| 127 |
+
* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT)
|
| 128 |
+
* Add RegNetY-160 weights from DeiT teacher model
|
| 129 |
+
* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288
|
| 130 |
+
* Some fixes/improvements for TFDS dataset wrapper
|
| 131 |
+
|
| 132 |
+
### March 17, 2021
|
| 133 |
+
* Add new ECA-NFNet-L0 (rename `nfnet_l0c`->`eca_nfnet_l0`) weights trained by myself.
|
| 134 |
+
* 82.6 top-1 @ 288x288, 82.8 @ 320x320, trained at 224x224
|
| 135 |
+
* Uses SiLU activation, approx 2x faster than `dm_nfnet_f0` and 50% faster than `nfnet_f0s` w/ 1/3 param count
|
| 136 |
+
* Integrate [Hugging Face model hub](https://huggingface.co/models) into timm create_model and default_cfg handling for pretrained weight and config sharing (more on this soon!)
|
| 137 |
+
* Merge HardCoRe NAS models contributed by https://github.com/yoniaflalo
|
| 138 |
+
* Merge PyTorch trained EfficientNet-EL and pruned ES/EL variants contributed by [DeGirum](https://github.com/DeGirum)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
### March 7, 2021
|
| 142 |
+
* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc).
|
| 143 |
+
* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation.
|
| 144 |
+
* Tested with PyTorch 1.8 release. Updated CI to use 1.8.
|
| 145 |
+
* Benchmarked several arch on RTX 3090, Titan RTX, and V100 across 1.7.1, 1.8, NGC 20.12, and 21.02. Some interesting performance variations to take note of https://gist.github.com/rwightman/bb59f9e245162cee0e38bd66bd8cd77f
|
| 146 |
+
|
| 147 |
+
### Feb 18, 2021
|
| 148 |
+
* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets).
|
| 149 |
+
* Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn.
|
| 150 |
+
* These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants.
|
| 151 |
+
* Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated).
|
| 152 |
+
* Matching the original pre-processing as closely as possible I get these results:
|
| 153 |
+
* `dm_nfnet_f6` - 86.352
|
| 154 |
+
* `dm_nfnet_f5` - 86.100
|
| 155 |
+
* `dm_nfnet_f4` - 85.834
|
| 156 |
+
* `dm_nfnet_f3` - 85.676
|
| 157 |
+
* `dm_nfnet_f2` - 85.178
|
| 158 |
+
* `dm_nfnet_f1` - 84.696
|
| 159 |
+
* `dm_nfnet_f0` - 83.464
|
| 160 |
+
|
| 161 |
+
### Feb 16, 2021
|
| 162 |
+
* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py.
|
| 163 |
+
* AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc`
|
| 164 |
+
* PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0`
|
| 165 |
+
* PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value`
|
| 166 |
+
* AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet.
|
| 167 |
+
|
| 168 |
+
### Feb 12, 2021
|
| 169 |
+
* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs
|
| 170 |
+
|
| 171 |
+
### Feb 10, 2021
|
| 172 |
+
* First Normalization-Free model training experiments done,
|
| 173 |
+
* nf_resnet50 - 80.68 top-1 @ 288x288, 80.31 @ 256x256
|
| 174 |
+
* nf_regnet_b1 - 79.30 @ 288x288, 78.75 @ 256x256
|
| 175 |
+
* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks')
|
| 176 |
+
* GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py`
|
| 177 |
+
* RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py`
|
| 178 |
+
* classic VGG (from torchvision, impl in `vgg.py`)
|
| 179 |
+
* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models
|
| 180 |
+
* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not.
|
| 181 |
+
* Fix a few bugs introduced since last pypi release
|
| 182 |
+
|
| 183 |
+
### Feb 8, 2021
|
| 184 |
+
* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352.
|
| 185 |
+
* `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256
|
| 186 |
+
* `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256
|
| 187 |
+
* `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320
|
| 188 |
+
* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed).
|
| 189 |
+
* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test.
|
| 190 |
+
|
| 191 |
+
### Jan 30, 2021
|
| 192 |
+
* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692)
|
| 193 |
+
|
| 194 |
+
### Jan 25, 2021
|
| 195 |
+
* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer
|
| 196 |
+
* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer
|
| 197 |
+
* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support
|
| 198 |
+
* NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning
|
| 199 |
+
* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit
|
| 200 |
+
* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes
|
| 201 |
+
* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script
|
| 202 |
+
* Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2`
|
| 203 |
+
* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar
|
| 204 |
+
* Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp`
|
| 205 |
+
* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling
|
| 206 |
+
|
| 207 |
+
### Jan 3, 2021
|
| 208 |
+
* Add SE-ResNet-152D weights
|
| 209 |
+
* 256x256 val, 0.94 crop top-1 - 83.75
|
| 210 |
+
* 320x320 val, 1.0 crop - 84.36
|
| 211 |
+
* Update [results files](results/)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
## Introduction
|
| 215 |
+
|
| 216 |
+
Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
|
| 217 |
+
|
| 218 |
+
The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything.
|
| 219 |
+
|
| 220 |
+
## Models
|
| 221 |
+
|
| 222 |
+
All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated. Here are some example [training hparams](https://rwightman.github.io/pytorch-image-models/training_hparam_examples) to get you started.
|
| 223 |
+
|
| 224 |
+
A full version of the list below with source links can be found in the [documentation](https://rwightman.github.io/pytorch-image-models/models/).
|
| 225 |
+
|
| 226 |
+
* Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723
|
| 227 |
+
* Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370
|
| 228 |
+
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
|
| 229 |
+
* CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239
|
| 230 |
+
* CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399
|
| 231 |
+
* ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697
|
| 232 |
+
* CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929
|
| 233 |
+
* DeiT (Vision Transformer) - https://arxiv.org/abs/2012.12877
|
| 234 |
+
* DenseNet - https://arxiv.org/abs/1608.06993
|
| 235 |
+
* DLA - https://arxiv.org/abs/1707.06484
|
| 236 |
+
* DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629
|
| 237 |
+
* EfficientNet (MBConvNet Family)
|
| 238 |
+
* EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
|
| 239 |
+
* EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
|
| 240 |
+
* EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
|
| 241 |
+
* EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
|
| 242 |
+
* EfficientNet V2 - https://arxiv.org/abs/2104.00298
|
| 243 |
+
* FBNet-C - https://arxiv.org/abs/1812.03443
|
| 244 |
+
* MixNet - https://arxiv.org/abs/1907.09595
|
| 245 |
+
* MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
|
| 246 |
+
* MobileNet-V2 - https://arxiv.org/abs/1801.04381
|
| 247 |
+
* Single-Path NAS - https://arxiv.org/abs/1904.02877
|
| 248 |
+
* GhostNet - https://arxiv.org/abs/1911.11907
|
| 249 |
+
* gMLP - https://arxiv.org/abs/2105.08050
|
| 250 |
+
* GPU-Efficient Networks - https://arxiv.org/abs/2006.14090
|
| 251 |
+
* Halo Nets - https://arxiv.org/abs/2103.12731
|
| 252 |
+
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646
|
| 253 |
+
* HRNet - https://arxiv.org/abs/1908.07919
|
| 254 |
+
* Inception-V3 - https://arxiv.org/abs/1512.00567
|
| 255 |
+
* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
|
| 256 |
+
* Lambda Networks - https://arxiv.org/abs/2102.08602
|
| 257 |
+
* LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136
|
| 258 |
+
* MLP-Mixer - https://arxiv.org/abs/2105.01601
|
| 259 |
+
* MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244
|
| 260 |
+
* NASNet-A - https://arxiv.org/abs/1707.07012
|
| 261 |
+
* NFNet-F - https://arxiv.org/abs/2102.06171
|
| 262 |
+
* NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692
|
| 263 |
+
* PNasNet - https://arxiv.org/abs/1712.00559
|
| 264 |
+
* Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302
|
| 265 |
+
* RegNet - https://arxiv.org/abs/2003.13678
|
| 266 |
+
* RepVGG - https://arxiv.org/abs/2101.03697
|
| 267 |
+
* ResMLP - https://arxiv.org/abs/2105.03404
|
| 268 |
+
* ResNet/ResNeXt
|
| 269 |
+
* ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385
|
| 270 |
+
* ResNeXt - https://arxiv.org/abs/1611.05431
|
| 271 |
+
* 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187
|
| 272 |
+
* Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932
|
| 273 |
+
* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546
|
| 274 |
+
* ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4
|
| 275 |
+
* Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507
|
| 276 |
+
* ResNet-RS - https://arxiv.org/abs/2103.07579
|
| 277 |
+
* Res2Net - https://arxiv.org/abs/1904.01169
|
| 278 |
+
* ResNeSt - https://arxiv.org/abs/2004.08955
|
| 279 |
+
* ReXNet - https://arxiv.org/abs/2007.00992
|
| 280 |
+
* SelecSLS - https://arxiv.org/abs/1907.00837
|
| 281 |
+
* Selective Kernel Networks - https://arxiv.org/abs/1903.06586
|
| 282 |
+
* Swin Transformer - https://arxiv.org/abs/2103.14030
|
| 283 |
+
* Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112
|
| 284 |
+
* TResNet - https://arxiv.org/abs/2003.13630
|
| 285 |
+
* Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf
|
| 286 |
+
* Vision Transformer - https://arxiv.org/abs/2010.11929
|
| 287 |
+
* VovNet V2 and V1 - https://arxiv.org/abs/1911.06667
|
| 288 |
+
* Xception - https://arxiv.org/abs/1610.02357
|
| 289 |
+
* Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611
|
| 290 |
+
* Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611
|
| 291 |
+
* XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681
|
| 292 |
+
|
| 293 |
+
## Features
|
| 294 |
+
|
| 295 |
+
Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP:
|
| 296 |
+
|
| 297 |
+
* All models have a common default configuration interface and API for
|
| 298 |
+
* accessing/changing the classifier - `get_classifier` and `reset_classifier`
|
| 299 |
+
* doing a forward pass on just the features - `forward_features` (see [documentation](https://rwightman.github.io/pytorch-image-models/feature_extraction/))
|
| 300 |
+
* these makes it easy to write consistent network wrappers that work with any of the models
|
| 301 |
+
* All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://rwightman.github.io/pytorch-image-models/feature_extraction/))
|
| 302 |
+
* `create_model(name, features_only=True, out_indices=..., output_stride=...)`
|
| 303 |
+
* `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level.
|
| 304 |
+
* `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this.
|
| 305 |
+
* feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member
|
| 306 |
+
* All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired
|
| 307 |
+
* High performance [reference training, validation, and inference scripts](https://rwightman.github.io/pytorch-image-models/scripts/) that work in several process/GPU modes:
|
| 308 |
+
* NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional)
|
| 309 |
+
* PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled)
|
| 310 |
+
* PyTorch w/ single GPU single process (AMP optional)
|
| 311 |
+
* A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights.
|
| 312 |
+
* A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs)
|
| 313 |
+
* Learning rate schedulers
|
| 314 |
+
* Ideas adopted from
|
| 315 |
+
* [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers)
|
| 316 |
+
* [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler)
|
| 317 |
+
* SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983)
|
| 318 |
+
* Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau`
|
| 319 |
+
* Optimizers:
|
| 320 |
+
* `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour.
|
| 321 |
+
* `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) (https://arxiv.org/abs/1908.03265)
|
| 322 |
+
* `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) (https://arxiv.org/abs/1905.11286)
|
| 323 |
+
* `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) (https://arxiv.org/abs/1907.08610)
|
| 324 |
+
* `fused<name>` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed
|
| 325 |
+
* `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) (https://arxiv.org/abs/2006.08217)
|
| 326 |
+
* `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) (https://arxiv.org/abs/1804.04235)
|
| 327 |
+
* `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) (https://arxiv.org/abs/2006.00719)
|
| 328 |
+
* Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) (https://arxiv.org/abs/1708.04896)
|
| 329 |
+
* Mixup (https://arxiv.org/abs/1710.09412)
|
| 330 |
+
* CutMix (https://arxiv.org/abs/1905.04899)
|
| 331 |
+
* AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py)
|
| 332 |
+
* AugMix w/ JSD loss (https://arxiv.org/abs/1912.02781), JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well
|
| 333 |
+
* SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data
|
| 334 |
+
* DropPath aka "Stochastic Depth" (https://arxiv.org/abs/1603.09382)
|
| 335 |
+
* DropBlock (https://arxiv.org/abs/1810.12890)
|
| 336 |
+
* Blur Pooling (https://arxiv.org/abs/1904.11486)
|
| 337 |
+
* Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper?
|
| 338 |
+
* Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets)
|
| 339 |
+
* An extensive selection of channel and/or spatial attention modules:
|
| 340 |
+
* Bottleneck Transformer - https://arxiv.org/abs/2101.11605
|
| 341 |
+
* CBAM - https://arxiv.org/abs/1807.06521
|
| 342 |
+
* Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667
|
| 343 |
+
* Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151
|
| 344 |
+
* Gather-Excite (GE) - https://arxiv.org/abs/1810.12348
|
| 345 |
+
* Global Context (GC) - https://arxiv.org/abs/1904.11492
|
| 346 |
+
* Halo - https://arxiv.org/abs/2103.12731
|
| 347 |
+
* Involution - https://arxiv.org/abs/2103.06255
|
| 348 |
+
* Lambda Layer - https://arxiv.org/abs/2102.08602
|
| 349 |
+
* Non-Local (NL) - https://arxiv.org/abs/1711.07971
|
| 350 |
+
* Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507
|
| 351 |
+
* Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586
|
| 352 |
+
* Split (SPLAT) - https://arxiv.org/abs/2004.08955
|
| 353 |
+
* Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030
|
| 354 |
+
|
| 355 |
+
## Results
|
| 356 |
+
|
| 357 |
+
Model validation results can be found in the [documentation](https://rwightman.github.io/pytorch-image-models/results/) and in the [results tables](results/README.md)
|
| 358 |
+
|
| 359 |
+
## Getting Started (Documentation)
|
| 360 |
+
|
| 361 |
+
My current [documentation](https://rwightman.github.io/pytorch-image-models/) for `timm` covers the basics.
|
| 362 |
+
|
| 363 |
+
[timmdocs](https://fastai.github.io/timmdocs/) is quickly becoming a much more comprehensive set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs.
|
| 364 |
+
|
| 365 |
+
[paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`.
|
| 366 |
+
|
| 367 |
+
## Train, Validation, Inference Scripts
|
| 368 |
+
|
| 369 |
+
The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://rwightman.github.io/pytorch-image-models/scripts/) for some basics and [training hparams](https://rwightman.github.io/pytorch-image-models/training_hparam_examples) for some train examples that produce SOTA ImageNet results.
|
| 370 |
+
|
| 371 |
+
## Awesome PyTorch Resources
|
| 372 |
+
|
| 373 |
+
One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below.
|
| 374 |
+
|
| 375 |
+
### Object Detection, Instance and Semantic Segmentation
|
| 376 |
+
* Detectron2 - https://github.com/facebookresearch/detectron2
|
| 377 |
+
* Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch
|
| 378 |
+
* EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch
|
| 379 |
+
|
| 380 |
+
### Computer Vision / Image Augmentation
|
| 381 |
+
* Albumentations - https://github.com/albumentations-team/albumentations
|
| 382 |
+
* Kornia - https://github.com/kornia/kornia
|
| 383 |
+
|
| 384 |
+
### Knowledge Distillation
|
| 385 |
+
* RepDistiller - https://github.com/HobbitLong/RepDistiller
|
| 386 |
+
* torchdistill - https://github.com/yoshitomo-matsubara/torchdistill
|
| 387 |
+
|
| 388 |
+
### Metric Learning
|
| 389 |
+
* PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning
|
| 390 |
+
|
| 391 |
+
### Training / Frameworks
|
| 392 |
+
* fastai - https://github.com/fastai/fastai
|
| 393 |
+
|
| 394 |
+
## Licenses
|
| 395 |
+
|
| 396 |
+
### Code
|
| 397 |
+
The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue.
|
| 398 |
+
|
| 399 |
+
### Pretrained Weights
|
| 400 |
+
So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product.
|
| 401 |
+
|
| 402 |
+
#### Pretrained on more than ImageNet
|
| 403 |
+
Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions.
|
| 404 |
+
|
| 405 |
+
## Citing
|
| 406 |
+
|
| 407 |
+
### BibTeX
|
| 408 |
+
|
| 409 |
+
```bibtex
|
| 410 |
+
@misc{rw2019timm,
|
| 411 |
+
author = {Ross Wightman},
|
| 412 |
+
title = {PyTorch Image Models},
|
| 413 |
+
year = {2019},
|
| 414 |
+
publisher = {GitHub},
|
| 415 |
+
journal = {GitHub repository},
|
| 416 |
+
doi = {10.5281/zenodo.4414861},
|
| 417 |
+
howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
|
| 418 |
+
}
|
| 419 |
+
```
|
| 420 |
+
|
| 421 |
+
### Latest DOI
|
| 422 |
+
|
| 423 |
+
[](https://zenodo.org/badge/latestdoi/168799526)
|
testbed/huggingface__pytorch-image-models/avg_checkpoints.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
""" Checkpoint Averaging Script
|
| 3 |
+
|
| 4 |
+
This script averages all model weights for checkpoints in specified path that match
|
| 5 |
+
the specified filter wildcard. All checkpoints must be from the exact same model.
|
| 6 |
+
|
| 7 |
+
For any hope of decent results, the checkpoints should be from the same or child
|
| 8 |
+
(via resumes) training session. This can be viewed as similar to maintaining running
|
| 9 |
+
EMA (exponential moving average) of the model weights or performing SWA (stochastic
|
| 10 |
+
weight averaging), but post-training.
|
| 11 |
+
|
| 12 |
+
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
|
| 13 |
+
"""
|
| 14 |
+
import torch
|
| 15 |
+
import argparse
|
| 16 |
+
import os
|
| 17 |
+
import glob
|
| 18 |
+
import hashlib
|
| 19 |
+
from timm.models.helpers import load_state_dict
|
| 20 |
+
|
| 21 |
+
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager')
|
| 22 |
+
parser.add_argument('--input', default='', type=str, metavar='PATH',
|
| 23 |
+
help='path to base input folder containing checkpoints')
|
| 24 |
+
parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD',
|
| 25 |
+
help='checkpoint filter (path wildcard)')
|
| 26 |
+
parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH',
|
| 27 |
+
help='output filename')
|
| 28 |
+
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
|
| 29 |
+
help='Force not using ema version of weights (if present)')
|
| 30 |
+
parser.add_argument('--no-sort', dest='no_sort', action='store_true',
|
| 31 |
+
help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant')
|
| 32 |
+
parser.add_argument('-n', type=int, default=10, metavar='N',
|
| 33 |
+
help='Number of checkpoints to average')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def checkpoint_metric(checkpoint_path):
|
| 37 |
+
if not checkpoint_path or not os.path.isfile(checkpoint_path):
|
| 38 |
+
return {}
|
| 39 |
+
print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path))
|
| 40 |
+
checkpoint = torch.load(checkpoint_path, map_location='cpu')
|
| 41 |
+
metric = None
|
| 42 |
+
if 'metric' in checkpoint:
|
| 43 |
+
metric = checkpoint['metric']
|
| 44 |
+
return metric
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def main():
|
| 48 |
+
args = parser.parse_args()
|
| 49 |
+
# by default use the EMA weights (if present)
|
| 50 |
+
args.use_ema = not args.no_use_ema
|
| 51 |
+
# by default sort by checkpoint metric (if present) and avg top n checkpoints
|
| 52 |
+
args.sort = not args.no_sort
|
| 53 |
+
|
| 54 |
+
if os.path.exists(args.output):
|
| 55 |
+
print("Error: Output filename ({}) already exists.".format(args.output))
|
| 56 |
+
exit(1)
|
| 57 |
+
|
| 58 |
+
pattern = args.input
|
| 59 |
+
if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep):
|
| 60 |
+
pattern += os.path.sep
|
| 61 |
+
pattern += args.filter
|
| 62 |
+
checkpoints = glob.glob(pattern, recursive=True)
|
| 63 |
+
|
| 64 |
+
if args.sort:
|
| 65 |
+
checkpoint_metrics = []
|
| 66 |
+
for c in checkpoints:
|
| 67 |
+
metric = checkpoint_metric(c)
|
| 68 |
+
if metric is not None:
|
| 69 |
+
checkpoint_metrics.append((metric, c))
|
| 70 |
+
checkpoint_metrics = list(sorted(checkpoint_metrics))
|
| 71 |
+
checkpoint_metrics = checkpoint_metrics[-args.n:]
|
| 72 |
+
print("Selected checkpoints:")
|
| 73 |
+
[print(m, c) for m, c in checkpoint_metrics]
|
| 74 |
+
avg_checkpoints = [c for m, c in checkpoint_metrics]
|
| 75 |
+
else:
|
| 76 |
+
avg_checkpoints = checkpoints
|
| 77 |
+
print("Selected checkpoints:")
|
| 78 |
+
[print(c) for c in checkpoints]
|
| 79 |
+
|
| 80 |
+
avg_state_dict = {}
|
| 81 |
+
avg_counts = {}
|
| 82 |
+
for c in avg_checkpoints:
|
| 83 |
+
new_state_dict = load_state_dict(c, args.use_ema)
|
| 84 |
+
if not new_state_dict:
|
| 85 |
+
print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint))
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
for k, v in new_state_dict.items():
|
| 89 |
+
if k not in avg_state_dict:
|
| 90 |
+
avg_state_dict[k] = v.clone().to(dtype=torch.float64)
|
| 91 |
+
avg_counts[k] = 1
|
| 92 |
+
else:
|
| 93 |
+
avg_state_dict[k] += v.to(dtype=torch.float64)
|
| 94 |
+
avg_counts[k] += 1
|
| 95 |
+
|
| 96 |
+
for k, v in avg_state_dict.items():
|
| 97 |
+
v.div_(avg_counts[k])
|
| 98 |
+
|
| 99 |
+
# float32 overflow seems unlikely based on weights seen to date, but who knows
|
| 100 |
+
float32_info = torch.finfo(torch.float32)
|
| 101 |
+
final_state_dict = {}
|
| 102 |
+
for k, v in avg_state_dict.items():
|
| 103 |
+
v = v.clamp(float32_info.min, float32_info.max)
|
| 104 |
+
final_state_dict[k] = v.to(dtype=torch.float32)
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False)
|
| 108 |
+
except:
|
| 109 |
+
torch.save(final_state_dict, args.output)
|
| 110 |
+
|
| 111 |
+
with open(args.output, 'rb') as f:
|
| 112 |
+
sha_hash = hashlib.sha256(f.read()).hexdigest()
|
| 113 |
+
print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash))
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == '__main__':
|
| 117 |
+
main()
|
testbed/huggingface__pytorch-image-models/benchmark.py
ADDED
|
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
""" Model Benchmark Script
|
| 3 |
+
|
| 4 |
+
An inference and train step benchmark script for timm models.
|
| 5 |
+
|
| 6 |
+
Hacked together by Ross Wightman (https://github.com/rwightman)
|
| 7 |
+
"""
|
| 8 |
+
import argparse
|
| 9 |
+
import os
|
| 10 |
+
import csv
|
| 11 |
+
import json
|
| 12 |
+
import time
|
| 13 |
+
import logging
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torch.nn.parallel
|
| 17 |
+
from collections import OrderedDict
|
| 18 |
+
from contextlib import suppress
|
| 19 |
+
from functools import partial
|
| 20 |
+
|
| 21 |
+
from timm.models import create_model, is_model, list_models
|
| 22 |
+
from timm.optim import create_optimizer_v2
|
| 23 |
+
from timm.data import resolve_data_config
|
| 24 |
+
from timm.utils import AverageMeter, setup_default_logging
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
has_apex = False
|
| 28 |
+
try:
|
| 29 |
+
from apex import amp
|
| 30 |
+
has_apex = True
|
| 31 |
+
except ImportError:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
has_native_amp = False
|
| 35 |
+
try:
|
| 36 |
+
if getattr(torch.cuda.amp, 'autocast') is not None:
|
| 37 |
+
has_native_amp = True
|
| 38 |
+
except AttributeError:
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
torch.backends.cudnn.benchmark = True
|
| 42 |
+
_logger = logging.getLogger('validate')
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
parser = argparse.ArgumentParser(description='PyTorch Benchmark')
|
| 46 |
+
|
| 47 |
+
# benchmark specific args
|
| 48 |
+
parser.add_argument('--model-list', metavar='NAME', default='',
|
| 49 |
+
help='txt file based list of model names to benchmark')
|
| 50 |
+
parser.add_argument('--bench', default='both', type=str,
|
| 51 |
+
help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'")
|
| 52 |
+
parser.add_argument('--detail', action='store_true', default=False,
|
| 53 |
+
help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False')
|
| 54 |
+
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
|
| 55 |
+
help='Output csv file for validation results (summary)')
|
| 56 |
+
parser.add_argument('--num-warm-iter', default=10, type=int,
|
| 57 |
+
metavar='N', help='Number of warmup iterations (default: 10)')
|
| 58 |
+
parser.add_argument('--num-bench-iter', default=40, type=int,
|
| 59 |
+
metavar='N', help='Number of benchmark iterations (default: 40)')
|
| 60 |
+
|
| 61 |
+
# common inference / train args
|
| 62 |
+
parser.add_argument('--model', '-m', metavar='NAME', default='resnet50',
|
| 63 |
+
help='model architecture (default: resnet50)')
|
| 64 |
+
parser.add_argument('-b', '--batch-size', default=256, type=int,
|
| 65 |
+
metavar='N', help='mini-batch size (default: 256)')
|
| 66 |
+
parser.add_argument('--img-size', default=None, type=int,
|
| 67 |
+
metavar='N', help='Input image dimension, uses model default if empty')
|
| 68 |
+
parser.add_argument('--input-size', default=None, nargs=3, type=int,
|
| 69 |
+
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
|
| 70 |
+
parser.add_argument('--num-classes', type=int, default=None,
|
| 71 |
+
help='Number classes in dataset')
|
| 72 |
+
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
|
| 73 |
+
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
|
| 74 |
+
parser.add_argument('--channels-last', action='store_true', default=False,
|
| 75 |
+
help='Use channels_last memory layout')
|
| 76 |
+
parser.add_argument('--amp', action='store_true', default=False,
|
| 77 |
+
help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.')
|
| 78 |
+
parser.add_argument('--precision', default='float32', type=str,
|
| 79 |
+
help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)')
|
| 80 |
+
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
|
| 81 |
+
help='convert model torchscript for inference')
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# train optimizer parameters
|
| 85 |
+
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
|
| 86 |
+
help='Optimizer (default: "sgd"')
|
| 87 |
+
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
|
| 88 |
+
help='Optimizer Epsilon (default: None, use opt default)')
|
| 89 |
+
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
|
| 90 |
+
help='Optimizer Betas (default: None, use opt default)')
|
| 91 |
+
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
|
| 92 |
+
help='Optimizer momentum (default: 0.9)')
|
| 93 |
+
parser.add_argument('--weight-decay', type=float, default=0.0001,
|
| 94 |
+
help='weight decay (default: 0.0001)')
|
| 95 |
+
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
|
| 96 |
+
help='Clip gradient norm (default: None, no clipping)')
|
| 97 |
+
parser.add_argument('--clip-mode', type=str, default='norm',
|
| 98 |
+
help='Gradient clipping mode. One of ("norm", "value", "agc")')
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# model regularization / loss params that impact model or loss fn
|
| 102 |
+
parser.add_argument('--smoothing', type=float, default=0.1,
|
| 103 |
+
help='Label smoothing (default: 0.1)')
|
| 104 |
+
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
|
| 105 |
+
help='Dropout rate (default: 0.)')
|
| 106 |
+
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
|
| 107 |
+
help='Drop path rate (default: None)')
|
| 108 |
+
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
|
| 109 |
+
help='Drop block rate (default: None)')
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def timestamp(sync=False):
|
| 113 |
+
return time.perf_counter()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def cuda_timestamp(sync=False, device=None):
|
| 117 |
+
if sync:
|
| 118 |
+
torch.cuda.synchronize(device=device)
|
| 119 |
+
return time.perf_counter()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def count_params(model: nn.Module):
|
| 123 |
+
return sum([m.numel() for m in model.parameters()])
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def resolve_precision(precision: str):
|
| 127 |
+
assert precision in ('amp', 'float16', 'bfloat16', 'float32')
|
| 128 |
+
use_amp = False
|
| 129 |
+
model_dtype = torch.float32
|
| 130 |
+
data_dtype = torch.float32
|
| 131 |
+
if precision == 'amp':
|
| 132 |
+
use_amp = True
|
| 133 |
+
elif precision == 'float16':
|
| 134 |
+
model_dtype = torch.float16
|
| 135 |
+
data_dtype = torch.float16
|
| 136 |
+
elif precision == 'bfloat16':
|
| 137 |
+
model_dtype = torch.bfloat16
|
| 138 |
+
data_dtype = torch.bfloat16
|
| 139 |
+
return use_amp, model_dtype, data_dtype
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class BenchmarkRunner:
|
| 143 |
+
def __init__(
|
| 144 |
+
self, model_name, detail=False, device='cuda', torchscript=False, precision='float32',
|
| 145 |
+
num_warm_iter=10, num_bench_iter=50, **kwargs):
|
| 146 |
+
self.model_name = model_name
|
| 147 |
+
self.detail = detail
|
| 148 |
+
self.device = device
|
| 149 |
+
self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision)
|
| 150 |
+
self.channels_last = kwargs.pop('channels_last', False)
|
| 151 |
+
self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress
|
| 152 |
+
|
| 153 |
+
self.model = create_model(
|
| 154 |
+
model_name,
|
| 155 |
+
num_classes=kwargs.pop('num_classes', None),
|
| 156 |
+
in_chans=3,
|
| 157 |
+
global_pool=kwargs.pop('gp', 'fast'),
|
| 158 |
+
scriptable=torchscript)
|
| 159 |
+
self.model.to(
|
| 160 |
+
device=self.device,
|
| 161 |
+
dtype=self.model_dtype,
|
| 162 |
+
memory_format=torch.channels_last if self.channels_last else None)
|
| 163 |
+
self.num_classes = self.model.num_classes
|
| 164 |
+
self.param_count = count_params(self.model)
|
| 165 |
+
_logger.info('Model %s created, param count: %d' % (model_name, self.param_count))
|
| 166 |
+
if torchscript:
|
| 167 |
+
self.model = torch.jit.script(self.model)
|
| 168 |
+
|
| 169 |
+
data_config = resolve_data_config(kwargs, model=self.model, use_test_size=True)
|
| 170 |
+
self.input_size = data_config['input_size']
|
| 171 |
+
self.batch_size = kwargs.pop('batch_size', 256)
|
| 172 |
+
|
| 173 |
+
self.example_inputs = None
|
| 174 |
+
self.num_warm_iter = num_warm_iter
|
| 175 |
+
self.num_bench_iter = num_bench_iter
|
| 176 |
+
self.log_freq = num_bench_iter // 5
|
| 177 |
+
if 'cuda' in self.device:
|
| 178 |
+
self.time_fn = partial(cuda_timestamp, device=self.device)
|
| 179 |
+
else:
|
| 180 |
+
self.time_fn = timestamp
|
| 181 |
+
|
| 182 |
+
def _init_input(self):
|
| 183 |
+
self.example_inputs = torch.randn(
|
| 184 |
+
(self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype)
|
| 185 |
+
if self.channels_last:
|
| 186 |
+
self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class InferenceBenchmarkRunner(BenchmarkRunner):
|
| 190 |
+
|
| 191 |
+
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
|
| 192 |
+
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
|
| 193 |
+
self.model.eval()
|
| 194 |
+
|
| 195 |
+
def run(self):
|
| 196 |
+
def _step():
|
| 197 |
+
t_step_start = self.time_fn()
|
| 198 |
+
with self.amp_autocast():
|
| 199 |
+
output = self.model(self.example_inputs)
|
| 200 |
+
t_step_end = self.time_fn(True)
|
| 201 |
+
return t_step_end - t_step_start
|
| 202 |
+
|
| 203 |
+
_logger.info(
|
| 204 |
+
f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
|
| 205 |
+
f'input size {self.input_size} and batch size {self.batch_size}.')
|
| 206 |
+
|
| 207 |
+
with torch.no_grad():
|
| 208 |
+
self._init_input()
|
| 209 |
+
|
| 210 |
+
for _ in range(self.num_warm_iter):
|
| 211 |
+
_step()
|
| 212 |
+
|
| 213 |
+
total_step = 0.
|
| 214 |
+
num_samples = 0
|
| 215 |
+
t_run_start = self.time_fn()
|
| 216 |
+
for i in range(self.num_bench_iter):
|
| 217 |
+
delta_fwd = _step()
|
| 218 |
+
total_step += delta_fwd
|
| 219 |
+
num_samples += self.batch_size
|
| 220 |
+
num_steps = i + 1
|
| 221 |
+
if num_steps % self.log_freq == 0:
|
| 222 |
+
_logger.info(
|
| 223 |
+
f"Infer [{num_steps}/{self.num_bench_iter}]."
|
| 224 |
+
f" {num_samples / total_step:0.2f} samples/sec."
|
| 225 |
+
f" {1000 * total_step / num_steps:0.3f} ms/step.")
|
| 226 |
+
t_run_end = self.time_fn(True)
|
| 227 |
+
t_run_elapsed = t_run_end - t_run_start
|
| 228 |
+
|
| 229 |
+
results = dict(
|
| 230 |
+
samples_per_sec=round(num_samples / t_run_elapsed, 2),
|
| 231 |
+
step_time=round(1000 * total_step / self.num_bench_iter, 3),
|
| 232 |
+
batch_size=self.batch_size,
|
| 233 |
+
img_size=self.input_size[-1],
|
| 234 |
+
param_count=round(self.param_count / 1e6, 2),
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
_logger.info(
|
| 238 |
+
f"Inference benchmark of {self.model_name} done. "
|
| 239 |
+
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step")
|
| 240 |
+
|
| 241 |
+
return results
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class TrainBenchmarkRunner(BenchmarkRunner):
|
| 245 |
+
|
| 246 |
+
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
|
| 247 |
+
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
|
| 248 |
+
self.model.train()
|
| 249 |
+
|
| 250 |
+
if kwargs.pop('smoothing', 0) > 0:
|
| 251 |
+
self.loss = nn.CrossEntropyLoss().to(self.device)
|
| 252 |
+
else:
|
| 253 |
+
self.loss = nn.CrossEntropyLoss().to(self.device)
|
| 254 |
+
self.target_shape = tuple()
|
| 255 |
+
|
| 256 |
+
self.optimizer = create_optimizer_v2(
|
| 257 |
+
self.model,
|
| 258 |
+
opt=kwargs.pop('opt', 'sgd'),
|
| 259 |
+
lr=kwargs.pop('lr', 1e-4))
|
| 260 |
+
|
| 261 |
+
def _gen_target(self, batch_size):
|
| 262 |
+
return torch.empty(
|
| 263 |
+
(batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes)
|
| 264 |
+
|
| 265 |
+
def run(self):
|
| 266 |
+
def _step(detail=False):
|
| 267 |
+
self.optimizer.zero_grad() # can this be ignored?
|
| 268 |
+
t_start = self.time_fn()
|
| 269 |
+
t_fwd_end = t_start
|
| 270 |
+
t_bwd_end = t_start
|
| 271 |
+
with self.amp_autocast():
|
| 272 |
+
output = self.model(self.example_inputs)
|
| 273 |
+
if isinstance(output, tuple):
|
| 274 |
+
output = output[0]
|
| 275 |
+
if detail:
|
| 276 |
+
t_fwd_end = self.time_fn(True)
|
| 277 |
+
target = self._gen_target(output.shape[0])
|
| 278 |
+
self.loss(output, target).backward()
|
| 279 |
+
if detail:
|
| 280 |
+
t_bwd_end = self.time_fn(True)
|
| 281 |
+
self.optimizer.step()
|
| 282 |
+
t_end = self.time_fn(True)
|
| 283 |
+
if detail:
|
| 284 |
+
delta_fwd = t_fwd_end - t_start
|
| 285 |
+
delta_bwd = t_bwd_end - t_fwd_end
|
| 286 |
+
delta_opt = t_end - t_bwd_end
|
| 287 |
+
return delta_fwd, delta_bwd, delta_opt
|
| 288 |
+
else:
|
| 289 |
+
delta_step = t_end - t_start
|
| 290 |
+
return delta_step
|
| 291 |
+
|
| 292 |
+
_logger.info(
|
| 293 |
+
f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
|
| 294 |
+
f'input size {self.input_size} and batch size {self.batch_size}.')
|
| 295 |
+
|
| 296 |
+
self._init_input()
|
| 297 |
+
|
| 298 |
+
for _ in range(self.num_warm_iter):
|
| 299 |
+
_step()
|
| 300 |
+
|
| 301 |
+
t_run_start = self.time_fn()
|
| 302 |
+
if self.detail:
|
| 303 |
+
total_fwd = 0.
|
| 304 |
+
total_bwd = 0.
|
| 305 |
+
total_opt = 0.
|
| 306 |
+
num_samples = 0
|
| 307 |
+
for i in range(self.num_bench_iter):
|
| 308 |
+
delta_fwd, delta_bwd, delta_opt = _step(True)
|
| 309 |
+
num_samples += self.batch_size
|
| 310 |
+
total_fwd += delta_fwd
|
| 311 |
+
total_bwd += delta_bwd
|
| 312 |
+
total_opt += delta_opt
|
| 313 |
+
num_steps = (i + 1)
|
| 314 |
+
if num_steps % self.log_freq == 0:
|
| 315 |
+
total_step = total_fwd + total_bwd + total_opt
|
| 316 |
+
_logger.info(
|
| 317 |
+
f"Train [{num_steps}/{self.num_bench_iter}]."
|
| 318 |
+
f" {num_samples / total_step:0.2f} samples/sec."
|
| 319 |
+
f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd,"
|
| 320 |
+
f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd,"
|
| 321 |
+
f" {1000 * total_opt / num_steps:0.3f} ms/step opt."
|
| 322 |
+
)
|
| 323 |
+
total_step = total_fwd + total_bwd + total_opt
|
| 324 |
+
t_run_elapsed = self.time_fn() - t_run_start
|
| 325 |
+
results = dict(
|
| 326 |
+
samples_per_sec=round(num_samples / t_run_elapsed, 2),
|
| 327 |
+
step_time=round(1000 * total_step / self.num_bench_iter, 3),
|
| 328 |
+
fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3),
|
| 329 |
+
bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3),
|
| 330 |
+
opt_time=round(1000 * total_opt / self.num_bench_iter, 3),
|
| 331 |
+
batch_size=self.batch_size,
|
| 332 |
+
img_size=self.input_size[-1],
|
| 333 |
+
param_count=round(self.param_count / 1e6, 2),
|
| 334 |
+
)
|
| 335 |
+
else:
|
| 336 |
+
total_step = 0.
|
| 337 |
+
num_samples = 0
|
| 338 |
+
for i in range(self.num_bench_iter):
|
| 339 |
+
delta_step = _step(False)
|
| 340 |
+
num_samples += self.batch_size
|
| 341 |
+
total_step += delta_step
|
| 342 |
+
num_steps = (i + 1)
|
| 343 |
+
if num_steps % self.log_freq == 0:
|
| 344 |
+
_logger.info(
|
| 345 |
+
f"Train [{num_steps}/{self.num_bench_iter}]."
|
| 346 |
+
f" {num_samples / total_step:0.2f} samples/sec."
|
| 347 |
+
f" {1000 * total_step / num_steps:0.3f} ms/step.")
|
| 348 |
+
t_run_elapsed = self.time_fn() - t_run_start
|
| 349 |
+
results = dict(
|
| 350 |
+
samples_per_sec=round(num_samples / t_run_elapsed, 2),
|
| 351 |
+
step_time=round(1000 * total_step / self.num_bench_iter, 3),
|
| 352 |
+
batch_size=self.batch_size,
|
| 353 |
+
img_size=self.input_size[-1],
|
| 354 |
+
param_count=round(self.param_count / 1e6, 2),
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
_logger.info(
|
| 358 |
+
f"Train benchmark of {self.model_name} done. "
|
| 359 |
+
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample")
|
| 360 |
+
|
| 361 |
+
return results
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def decay_batch_exp(batch_size, factor=0.5, divisor=16):
|
| 365 |
+
out_batch_size = batch_size * factor
|
| 366 |
+
if out_batch_size > divisor:
|
| 367 |
+
out_batch_size = (out_batch_size + 1) // divisor * divisor
|
| 368 |
+
else:
|
| 369 |
+
out_batch_size = batch_size - 1
|
| 370 |
+
return max(0, int(out_batch_size))
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs):
|
| 374 |
+
batch_size = initial_batch_size
|
| 375 |
+
results = dict()
|
| 376 |
+
while batch_size >= 1:
|
| 377 |
+
torch.cuda.empty_cache()
|
| 378 |
+
try:
|
| 379 |
+
bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
|
| 380 |
+
results = bench.run()
|
| 381 |
+
return results
|
| 382 |
+
except RuntimeError as e:
|
| 383 |
+
print(f'Error: {str(e)} while running benchmark. Reducing batch size to {batch_size} for retry.')
|
| 384 |
+
batch_size = decay_batch_exp(batch_size)
|
| 385 |
+
return results
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def benchmark(args):
|
| 389 |
+
if args.amp:
|
| 390 |
+
_logger.warning("Overriding precision to 'amp' since --amp flag set.")
|
| 391 |
+
args.precision = 'amp'
|
| 392 |
+
_logger.info(f'Benchmarking in {args.precision} precision. '
|
| 393 |
+
f'{"NHWC" if args.channels_last else "NCHW"} layout. '
|
| 394 |
+
f'torchscript {"enabled" if args.torchscript else "disabled"}')
|
| 395 |
+
|
| 396 |
+
bench_kwargs = vars(args).copy()
|
| 397 |
+
bench_kwargs.pop('amp')
|
| 398 |
+
model = bench_kwargs.pop('model')
|
| 399 |
+
batch_size = bench_kwargs.pop('batch_size')
|
| 400 |
+
|
| 401 |
+
bench_fns = (InferenceBenchmarkRunner,)
|
| 402 |
+
prefixes = ('infer',)
|
| 403 |
+
if args.bench == 'both':
|
| 404 |
+
bench_fns = (
|
| 405 |
+
InferenceBenchmarkRunner,
|
| 406 |
+
TrainBenchmarkRunner
|
| 407 |
+
)
|
| 408 |
+
prefixes = ('infer', 'train')
|
| 409 |
+
elif args.bench == 'train':
|
| 410 |
+
bench_fns = TrainBenchmarkRunner,
|
| 411 |
+
prefixes = 'train',
|
| 412 |
+
|
| 413 |
+
model_results = OrderedDict(model=model)
|
| 414 |
+
for prefix, bench_fn in zip(prefixes, bench_fns):
|
| 415 |
+
run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs)
|
| 416 |
+
if prefix:
|
| 417 |
+
run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()}
|
| 418 |
+
model_results.update(run_results)
|
| 419 |
+
param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0))
|
| 420 |
+
model_results.setdefault('param_count', param_count)
|
| 421 |
+
model_results.pop('train_param_count', 0)
|
| 422 |
+
return model_results
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def main():
|
| 426 |
+
setup_default_logging()
|
| 427 |
+
args = parser.parse_args()
|
| 428 |
+
model_cfgs = []
|
| 429 |
+
model_names = []
|
| 430 |
+
|
| 431 |
+
if args.model_list:
|
| 432 |
+
args.model = ''
|
| 433 |
+
with open(args.model_list) as f:
|
| 434 |
+
model_names = [line.rstrip() for line in f]
|
| 435 |
+
model_cfgs = [(n, None) for n in model_names]
|
| 436 |
+
elif args.model == 'all':
|
| 437 |
+
# validate all models in a list of names with pretrained checkpoints
|
| 438 |
+
args.pretrained = True
|
| 439 |
+
model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
|
| 440 |
+
model_cfgs = [(n, None) for n in model_names]
|
| 441 |
+
elif not is_model(args.model):
|
| 442 |
+
# model name doesn't exist, try as wildcard filter
|
| 443 |
+
model_names = list_models(args.model)
|
| 444 |
+
model_cfgs = [(n, None) for n in model_names]
|
| 445 |
+
|
| 446 |
+
if len(model_cfgs):
|
| 447 |
+
results_file = args.results_file or './benchmark.csv'
|
| 448 |
+
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
|
| 449 |
+
results = []
|
| 450 |
+
try:
|
| 451 |
+
for m, _ in model_cfgs:
|
| 452 |
+
if not m:
|
| 453 |
+
continue
|
| 454 |
+
args.model = m
|
| 455 |
+
r = benchmark(args)
|
| 456 |
+
results.append(r)
|
| 457 |
+
except KeyboardInterrupt as e:
|
| 458 |
+
pass
|
| 459 |
+
sort_key = 'train_samples_per_sec' if 'train' in args.bench else 'infer_samples_per_sec'
|
| 460 |
+
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
|
| 461 |
+
if len(results):
|
| 462 |
+
write_results(results_file, results)
|
| 463 |
+
|
| 464 |
+
import json
|
| 465 |
+
json_str = json.dumps(results, indent=4)
|
| 466 |
+
print(json_str)
|
| 467 |
+
else:
|
| 468 |
+
benchmark(args)
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def write_results(results_file, results):
|
| 472 |
+
with open(results_file, mode='w') as cf:
|
| 473 |
+
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
|
| 474 |
+
dw.writeheader()
|
| 475 |
+
for r in results:
|
| 476 |
+
dw.writerow(r)
|
| 477 |
+
cf.flush()
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
if __name__ == '__main__':
|
| 481 |
+
main()
|
testbed/huggingface__pytorch-image-models/clean_checkpoint.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
""" Checkpoint Cleaning Script
|
| 3 |
+
|
| 4 |
+
Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc.
|
| 5 |
+
and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
|
| 6 |
+
calculation for model zoo compatibility.
|
| 7 |
+
|
| 8 |
+
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
|
| 9 |
+
"""
|
| 10 |
+
import torch
|
| 11 |
+
import argparse
|
| 12 |
+
import os
|
| 13 |
+
import hashlib
|
| 14 |
+
import shutil
|
| 15 |
+
from collections import OrderedDict
|
| 16 |
+
|
| 17 |
+
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner')
|
| 18 |
+
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
|
| 19 |
+
help='path to latest checkpoint (default: none)')
|
| 20 |
+
parser.add_argument('--output', default='', type=str, metavar='PATH',
|
| 21 |
+
help='output path')
|
| 22 |
+
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
|
| 23 |
+
help='use ema version of weights if present')
|
| 24 |
+
parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true',
|
| 25 |
+
help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint')
|
| 26 |
+
|
| 27 |
+
_TEMP_NAME = './_checkpoint.pth'
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def main():
|
| 31 |
+
args = parser.parse_args()
|
| 32 |
+
|
| 33 |
+
if os.path.exists(args.output):
|
| 34 |
+
print("Error: Output filename ({}) already exists.".format(args.output))
|
| 35 |
+
exit(1)
|
| 36 |
+
|
| 37 |
+
# Load an existing checkpoint to CPU, strip everything but the state_dict and re-save
|
| 38 |
+
if args.checkpoint and os.path.isfile(args.checkpoint):
|
| 39 |
+
print("=> Loading checkpoint '{}'".format(args.checkpoint))
|
| 40 |
+
checkpoint = torch.load(args.checkpoint, map_location='cpu')
|
| 41 |
+
|
| 42 |
+
new_state_dict = OrderedDict()
|
| 43 |
+
if isinstance(checkpoint, dict):
|
| 44 |
+
state_dict_key = 'state_dict_ema' if args.use_ema else 'state_dict'
|
| 45 |
+
if state_dict_key in checkpoint:
|
| 46 |
+
state_dict = checkpoint[state_dict_key]
|
| 47 |
+
else:
|
| 48 |
+
state_dict = checkpoint
|
| 49 |
+
else:
|
| 50 |
+
assert False
|
| 51 |
+
for k, v in state_dict.items():
|
| 52 |
+
if args.clean_aux_bn and 'aux_bn' in k:
|
| 53 |
+
# If all aux_bn keys are removed, the SplitBN layers will end up as normal and
|
| 54 |
+
# load with the unmodified model using BatchNorm2d.
|
| 55 |
+
continue
|
| 56 |
+
name = k[7:] if k.startswith('module') else k
|
| 57 |
+
new_state_dict[name] = v
|
| 58 |
+
print("=> Loaded state_dict from '{}'".format(args.checkpoint))
|
| 59 |
+
|
| 60 |
+
try:
|
| 61 |
+
torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False)
|
| 62 |
+
except:
|
| 63 |
+
torch.save(new_state_dict, _TEMP_NAME)
|
| 64 |
+
|
| 65 |
+
with open(_TEMP_NAME, 'rb') as f:
|
| 66 |
+
sha_hash = hashlib.sha256(f.read()).hexdigest()
|
| 67 |
+
|
| 68 |
+
if args.output:
|
| 69 |
+
checkpoint_root, checkpoint_base = os.path.split(args.output)
|
| 70 |
+
checkpoint_base = os.path.splitext(checkpoint_base)[0]
|
| 71 |
+
else:
|
| 72 |
+
checkpoint_root = ''
|
| 73 |
+
checkpoint_base = os.path.splitext(args.checkpoint)[0]
|
| 74 |
+
final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth'
|
| 75 |
+
shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename))
|
| 76 |
+
print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash))
|
| 77 |
+
else:
|
| 78 |
+
print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint))
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
if __name__ == '__main__':
|
| 82 |
+
main()
|
testbed/huggingface__pytorch-image-models/convert/convert_nest_flax.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Convert weights from https://github.com/google-research/nested-transformer
|
| 3 |
+
NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from clu import checkpoint
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
arch_depths = {
|
| 15 |
+
'nest_base': [2, 2, 20],
|
| 16 |
+
'nest_small': [2, 2, 20],
|
| 17 |
+
'nest_tiny': [2, 2, 8],
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def convert_nest(checkpoint_path, arch):
|
| 22 |
+
"""
|
| 23 |
+
Expects path to checkpoint which is a dir containing 4 files like in each of these folders
|
| 24 |
+
- https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints
|
| 25 |
+
`arch` is needed to
|
| 26 |
+
Returns a state dict that can be used with `torch.nn.Module.load_state_dict`
|
| 27 |
+
Hint: Follow timm.models.nest.Nest.__init__ and
|
| 28 |
+
https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py
|
| 29 |
+
"""
|
| 30 |
+
assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported"
|
| 31 |
+
|
| 32 |
+
flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target']
|
| 33 |
+
state_dict = {}
|
| 34 |
+
|
| 35 |
+
# Patch embedding
|
| 36 |
+
state_dict['patch_embed.proj.weight'] = torch.tensor(
|
| 37 |
+
flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1)
|
| 38 |
+
state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias'])
|
| 39 |
+
|
| 40 |
+
# Positional embeddings
|
| 41 |
+
posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')]
|
| 42 |
+
for i, k in enumerate(posemb_keys):
|
| 43 |
+
state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding'])
|
| 44 |
+
|
| 45 |
+
# Transformer encoders
|
| 46 |
+
depths = arch_depths[arch]
|
| 47 |
+
for level in range(len(depths)):
|
| 48 |
+
for layer in range(depths[level]):
|
| 49 |
+
global_layer_ix = sum(depths[:level]) + layer
|
| 50 |
+
# Norms
|
| 51 |
+
for i in range(2):
|
| 52 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor(
|
| 53 |
+
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale'])
|
| 54 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor(
|
| 55 |
+
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias'])
|
| 56 |
+
# Attention qkv
|
| 57 |
+
w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel']
|
| 58 |
+
w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel']
|
| 59 |
+
# Pay attention to dims here (maybe get pen and paper)
|
| 60 |
+
w_kv = np.concatenate(np.split(w_kv, 2, -1), 1)
|
| 61 |
+
w_qkv = np.concatenate([w_q, w_kv], 1)
|
| 62 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0)
|
| 63 |
+
b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias']
|
| 64 |
+
b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias']
|
| 65 |
+
# Pay attention to dims here (maybe get pen and paper)
|
| 66 |
+
b_kv = np.concatenate(np.split(b_kv, 2, -1), 0)
|
| 67 |
+
b_qkv = np.concatenate([b_q, b_kv], 0)
|
| 68 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1)
|
| 69 |
+
# Attention proj
|
| 70 |
+
w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel']
|
| 71 |
+
w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1)
|
| 72 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj
|
| 73 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(
|
| 74 |
+
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias'])
|
| 75 |
+
# MLP
|
| 76 |
+
for i in range(2):
|
| 77 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor(
|
| 78 |
+
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0)
|
| 79 |
+
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor(
|
| 80 |
+
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias'])
|
| 81 |
+
|
| 82 |
+
# Block aggregations (ConvPool)
|
| 83 |
+
for level in range(1, len(depths)):
|
| 84 |
+
# Convs
|
| 85 |
+
state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(
|
| 86 |
+
flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1)
|
| 87 |
+
state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(
|
| 88 |
+
flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias'])
|
| 89 |
+
# Norms
|
| 90 |
+
state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(
|
| 91 |
+
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale'])
|
| 92 |
+
state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(
|
| 93 |
+
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias'])
|
| 94 |
+
|
| 95 |
+
# Final norm
|
| 96 |
+
state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale'])
|
| 97 |
+
state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias'])
|
| 98 |
+
|
| 99 |
+
# Classifier
|
| 100 |
+
state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0)
|
| 101 |
+
state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias'])
|
| 102 |
+
|
| 103 |
+
return state_dict
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if __name__ == '__main__':
|
| 107 |
+
variant = sys.argv[1] # base, small, or tiny
|
| 108 |
+
state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}')
|
| 109 |
+
torch.save(state_dict, f'./jx_nest_{variant}.pth')
|
testbed/huggingface__pytorch-image-models/distributed_train.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
NUM_PROC=$1
|
| 3 |
+
shift
|
| 4 |
+
python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC train.py "$@"
|
| 5 |
+
|
testbed/huggingface__pytorch-image-models/docs/archived_changes.md
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Archived Changes
|
| 2 |
+
|
| 3 |
+
### Dec 18, 2020
|
| 4 |
+
* Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256
|
| 5 |
+
* 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25)
|
| 6 |
+
* 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76)
|
| 7 |
+
* 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01)
|
| 8 |
+
|
| 9 |
+
### Dec 7, 2020
|
| 10 |
+
* Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models
|
| 11 |
+
* Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript
|
| 12 |
+
* PyPi release @ 0.3.2 (needed by EfficientDet)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
### Oct 30, 2020
|
| 16 |
+
* Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue.
|
| 17 |
+
* Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16.
|
| 18 |
+
* Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated.
|
| 19 |
+
* Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage.
|
| 20 |
+
* PyPi release @ 0.3.0 version!
|
| 21 |
+
|
| 22 |
+
### Oct 26, 2020
|
| 23 |
+
* Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer
|
| 24 |
+
* Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl
|
| 25 |
+
* ViT-B/16 - 84.2
|
| 26 |
+
* ViT-B/32 - 81.7
|
| 27 |
+
* ViT-L/16 - 85.2
|
| 28 |
+
* ViT-L/32 - 81.5
|
| 29 |
+
|
| 30 |
+
### Oct 21, 2020
|
| 31 |
+
* Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs.
|
| 32 |
+
|
| 33 |
+
### Oct 13, 2020
|
| 34 |
+
* Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train...
|
| 35 |
+
* Adafactor and AdaHessian (FP32 only, no AMP) optimizers
|
| 36 |
+
* EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1
|
| 37 |
+
* Pip release, doc updates pending a few more changes...
|
| 38 |
+
|
| 39 |
+
### Sept 18, 2020
|
| 40 |
+
* New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D
|
| 41 |
+
* Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D)
|
| 42 |
+
|
| 43 |
+
### Sept 3, 2020
|
| 44 |
+
* New weights
|
| 45 |
+
* Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision)
|
| 46 |
+
* SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene)
|
| 47 |
+
* Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`)
|
| 48 |
+
* Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel.
|
| 49 |
+
|
| 50 |
+
### Aug 12, 2020
|
| 51 |
+
* New/updated weights from training experiments
|
| 52 |
+
* EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp)
|
| 53 |
+
* RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver)
|
| 54 |
+
* CSPResNet50 - 79.6 top-1 (76.6 from official ver)
|
| 55 |
+
* Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples
|
| 56 |
+
* Some fixes for using pretrained weights with `in_chans` != 3 on several models.
|
| 57 |
+
|
| 58 |
+
### Aug 5, 2020
|
| 59 |
+
Universal feature extraction, new models, new weights, new test sets.
|
| 60 |
+
* All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride.
|
| 61 |
+
* New models
|
| 62 |
+
* CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet
|
| 63 |
+
* ReXNet
|
| 64 |
+
* (Modified Aligned) Xception41/65/71 (a proper port of TF models)
|
| 65 |
+
* New trained weights
|
| 66 |
+
* SEResNet50 - 80.3 top-1
|
| 67 |
+
* CSPDarkNet53 - 80.1 top-1
|
| 68 |
+
* CSPResNeXt50 - 80.0 top-1
|
| 69 |
+
* DPN68b - 79.2 top-1
|
| 70 |
+
* EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314))
|
| 71 |
+
* Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md)
|
| 72 |
+
* Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh)
|
| 73 |
+
* Train script and loader/transform tweaks to punch through more aug arguments
|
| 74 |
+
* README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/
|
| 75 |
+
* adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell)
|
| 76 |
+
|
| 77 |
+
### June 11, 2020
|
| 78 |
+
Bunch of changes:
|
| 79 |
+
* DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions
|
| 80 |
+
* VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1
|
| 81 |
+
* Activation factory added along with new activations:
|
| 82 |
+
* select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export)
|
| 83 |
+
* hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish
|
| 84 |
+
* context mgr for setting exportable/scriptable/no_jit states
|
| 85 |
+
* Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface
|
| 86 |
+
* Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this
|
| 87 |
+
* Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call
|
| 88 |
+
* Prep for 0.1.28 pip release
|
| 89 |
+
|
| 90 |
+
### May 12, 2020
|
| 91 |
+
* Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955))
|
| 92 |
+
|
| 93 |
+
### May 3, 2020
|
| 94 |
+
* Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo)
|
| 95 |
+
|
| 96 |
+
### May 1, 2020
|
| 97 |
+
* Merged a number of execellent contributions in the ResNet model family over the past month
|
| 98 |
+
* BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3.
|
| 99 |
+
* TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23)
|
| 100 |
+
* ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo)
|
| 101 |
+
* 200 pretrained models in total now with updated results csv in results folder
|
| 102 |
+
|
| 103 |
+
### April 5, 2020
|
| 104 |
+
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
|
| 105 |
+
* 3.5M param MobileNet-V2 100 @ 73%
|
| 106 |
+
* 4.5M param MobileNet-V2 110d @ 75%
|
| 107 |
+
* 6.1M param MobileNet-V2 140 @ 76.5%
|
| 108 |
+
* 5.8M param MobileNet-V2 120d @ 77.3%
|
| 109 |
+
|
| 110 |
+
### March 18, 2020
|
| 111 |
+
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
|
| 112 |
+
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
|
| 113 |
+
|
| 114 |
+
### April 5, 2020
|
| 115 |
+
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
|
| 116 |
+
* 3.5M param MobileNet-V2 100 @ 73%
|
| 117 |
+
* 4.5M param MobileNet-V2 110d @ 75%
|
| 118 |
+
* 6.1M param MobileNet-V2 140 @ 76.5%
|
| 119 |
+
* 5.8M param MobileNet-V2 120d @ 77.3%
|
| 120 |
+
|
| 121 |
+
### March 18, 2020
|
| 122 |
+
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
|
| 123 |
+
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
|
| 124 |
+
|
| 125 |
+
### Feb 29, 2020
|
| 126 |
+
* New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1
|
| 127 |
+
* IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models
|
| 128 |
+
* overall results similar to a bit better training from scratch on a few smaller models tried
|
| 129 |
+
* performance early in training seems consistently improved but less difference by end
|
| 130 |
+
* set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour
|
| 131 |
+
* Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training
|
| 132 |
+
|
| 133 |
+
### Feb 18, 2020
|
| 134 |
+
* Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268):
|
| 135 |
+
* Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion
|
| 136 |
+
* ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks
|
| 137 |
+
* Add Selective Kernel Nets on top of ResNet base, pretrained weights
|
| 138 |
+
* skresnet18 - 73% top-1
|
| 139 |
+
* skresnet34 - 76.9% top-1
|
| 140 |
+
* skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1
|
| 141 |
+
* ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme)
|
| 142 |
+
* CBAM attention experiment (not the best results so far, may remove)
|
| 143 |
+
* Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets
|
| 144 |
+
* Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants
|
| 145 |
+
* Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights
|
| 146 |
+
|
| 147 |
+
### Feb 12, 2020
|
| 148 |
+
* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
|
| 149 |
+
|
| 150 |
+
### Feb 6, 2020
|
| 151 |
+
* Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
|
| 152 |
+
|
| 153 |
+
### Feb 1/2, 2020
|
| 154 |
+
* Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization.
|
| 155 |
+
* Update results csv files on all models for ImageNet validation and three other test sets
|
| 156 |
+
* Push PyPi package update
|
| 157 |
+
|
| 158 |
+
### Jan 31, 2020
|
| 159 |
+
* Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below.
|
| 160 |
+
|
| 161 |
+
### Jan 11/12, 2020
|
| 162 |
+
* Master may be a bit unstable wrt to training, these changes have been tested but not all combos
|
| 163 |
+
* Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset
|
| 164 |
+
* SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper
|
| 165 |
+
* ResNet-50 AugMix trained model w/ 79% top-1 added
|
| 166 |
+
* `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd'
|
| 167 |
+
|
| 168 |
+
### Jan 3, 2020
|
| 169 |
+
* Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section)
|
| 170 |
+
* Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs.
|
| 171 |
+
|
| 172 |
+
### Dec 30, 2019
|
| 173 |
+
* Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch
|
| 174 |
+
|
| 175 |
+
### Dec 28, 2019
|
| 176 |
+
* Add new model weights and training hparams (see Training Hparams section)
|
| 177 |
+
* `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct
|
| 178 |
+
* trained with RandAugment, ended up with an interesting but less than perfect result (see training section)
|
| 179 |
+
* `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5
|
| 180 |
+
* deep stem (32, 32, 64), avgpool downsample
|
| 181 |
+
* stem/dowsample from bag-of-tricks paper
|
| 182 |
+
* `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5
|
| 183 |
+
* deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant)
|
| 184 |
+
* stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments
|
| 185 |
+
|
| 186 |
+
### Dec 23, 2019
|
| 187 |
+
* Add RandAugment trained MixNet-XL weights with 80.48 top-1.
|
| 188 |
+
* `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval
|
| 189 |
+
|
| 190 |
+
### Dec 4, 2019
|
| 191 |
+
* Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5).
|
| 192 |
+
|
| 193 |
+
### Nov 29, 2019
|
| 194 |
+
* Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded.
|
| 195 |
+
* AdvProp weights added
|
| 196 |
+
* Official TF MobileNetv3 weights added
|
| 197 |
+
* EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here...
|
| 198 |
+
* HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification
|
| 199 |
+
* Consistency in global pooling, `reset_classifer`, and `forward_features` across models
|
| 200 |
+
* `forward_features` always returns unpooled feature maps now
|
| 201 |
+
* Reasonable chance I broke something... let me know
|
| 202 |
+
|
| 203 |
+
### Nov 22, 2019
|
| 204 |
+
* Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update.
|
| 205 |
+
* `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise.
|
testbed/huggingface__pytorch-image-models/docs/changes.md
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Recent Changes
|
| 2 |
+
|
| 3 |
+
### June 8, 2021
|
| 4 |
+
* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1.
|
| 5 |
+
* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1.
|
| 6 |
+
* NFNet inspired block layout with quad layer stem and no maxpool
|
| 7 |
+
* Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288
|
| 8 |
+
|
| 9 |
+
### May 25, 2021
|
| 10 |
+
* Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models
|
| 11 |
+
* Cleanup input_size/img_size override handling and testing for all vision transformer models
|
| 12 |
+
* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params.
|
| 13 |
+
|
| 14 |
+
### May 14, 2021
|
| 15 |
+
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
|
| 16 |
+
* 1k trained variants: `tf_efficientnetv2_s/m/l`
|
| 17 |
+
* 21k trained variants: `tf_efficientnetv2_s/m/l_in21k`
|
| 18 |
+
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k`
|
| 19 |
+
* v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3`
|
| 20 |
+
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
|
| 21 |
+
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training
|
| 22 |
+
|
| 23 |
+
### May 5, 2021
|
| 24 |
+
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
|
| 25 |
+
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
|
| 26 |
+
* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora)
|
| 27 |
+
* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin)
|
| 28 |
+
* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23)
|
| 29 |
+
* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai)
|
| 30 |
+
* Update ByoaNet attention modles
|
| 31 |
+
* Improve SA module inits
|
| 32 |
+
* Hack together experimental stand-alone Swin based attn module and `swinnet`
|
| 33 |
+
* Consistent '26t' model defs for experiments.
|
| 34 |
+
* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1.
|
| 35 |
+
* WandB logging support
|
| 36 |
+
|
| 37 |
+
### April 13, 2021
|
| 38 |
+
* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer
|
| 39 |
+
|
| 40 |
+
### April 12, 2021
|
| 41 |
+
* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256.
|
| 42 |
+
* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training.
|
| 43 |
+
* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs
|
| 44 |
+
* Lambda Networks - https://arxiv.org/abs/2102.08602
|
| 45 |
+
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
|
| 46 |
+
* Halo Nets - https://arxiv.org/abs/2103.12731
|
| 47 |
+
* Adabelief optimizer contributed by Juntang Zhuang
|
| 48 |
+
|
| 49 |
+
### April 1, 2021
|
| 50 |
+
* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference
|
| 51 |
+
* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit)
|
| 52 |
+
* Merged distilled variant into main for torchscript compatibility
|
| 53 |
+
* Some `timm` cleanup/style tweaks and weights have hub download support
|
| 54 |
+
* Cleanup Vision Transformer (ViT) models
|
| 55 |
+
* Merge distilled (DeiT) model into main so that torchscript can work
|
| 56 |
+
* Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch)
|
| 57 |
+
* Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids
|
| 58 |
+
* Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants
|
| 59 |
+
* nn.Sequential for block stack (does not break downstream compat)
|
| 60 |
+
* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT)
|
| 61 |
+
* Add RegNetY-160 weights from DeiT teacher model
|
| 62 |
+
* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288
|
| 63 |
+
* Some fixes/improvements for TFDS dataset wrapper
|
| 64 |
+
|
| 65 |
+
### March 7, 2021
|
| 66 |
+
* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc).
|
| 67 |
+
* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation.
|
| 68 |
+
|
| 69 |
+
### Feb 18, 2021
|
| 70 |
+
* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets).
|
| 71 |
+
* Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn.
|
| 72 |
+
* These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants.
|
| 73 |
+
* Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated).
|
| 74 |
+
* Matching the original pre-processing as closely as possible I get these results:
|
| 75 |
+
* `dm_nfnet_f6` - 86.352
|
| 76 |
+
* `dm_nfnet_f5` - 86.100
|
| 77 |
+
* `dm_nfnet_f4` - 85.834
|
| 78 |
+
* `dm_nfnet_f3` - 85.676
|
| 79 |
+
* `dm_nfnet_f2` - 85.178
|
| 80 |
+
* `dm_nfnet_f1` - 84.696
|
| 81 |
+
* `dm_nfnet_f0` - 83.464
|
| 82 |
+
|
| 83 |
+
### Feb 16, 2021
|
| 84 |
+
* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py.
|
| 85 |
+
* AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc`
|
| 86 |
+
* PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0`
|
| 87 |
+
* PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value`
|
| 88 |
+
* AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet.
|
| 89 |
+
|
| 90 |
+
### Feb 12, 2021
|
| 91 |
+
* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs
|
| 92 |
+
|
| 93 |
+
### Feb 10, 2021
|
| 94 |
+
* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks')
|
| 95 |
+
* GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py`
|
| 96 |
+
* RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py`
|
| 97 |
+
* classic VGG (from torchvision, impl in `vgg`)
|
| 98 |
+
* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models
|
| 99 |
+
* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not.
|
| 100 |
+
* Fix a few bugs introduced since last pypi release
|
| 101 |
+
|
| 102 |
+
### Feb 8, 2021
|
| 103 |
+
* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352.
|
| 104 |
+
* `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256
|
| 105 |
+
* `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256
|
| 106 |
+
* `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320
|
| 107 |
+
* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed).
|
| 108 |
+
* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test.
|
| 109 |
+
|
| 110 |
+
### Jan 30, 2021
|
| 111 |
+
* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692)
|
| 112 |
+
|
| 113 |
+
### Jan 25, 2021
|
| 114 |
+
* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer
|
| 115 |
+
* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer
|
| 116 |
+
* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support
|
| 117 |
+
* NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning
|
| 118 |
+
* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit
|
| 119 |
+
* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes
|
| 120 |
+
* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script
|
| 121 |
+
* Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2`
|
| 122 |
+
* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar
|
| 123 |
+
* Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp`
|
| 124 |
+
* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling
|
| 125 |
+
|
| 126 |
+
### Jan 3, 2021
|
| 127 |
+
* Add SE-ResNet-152D weights
|
| 128 |
+
* 256x256 val, 0.94 crop top-1 - 83.75
|
| 129 |
+
* 320x320 val, 1.0 crop - 84.36
|
| 130 |
+
* Update results files
|
testbed/huggingface__pytorch-image-models/docs/feature_extraction.md
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Feature Extraction
|
| 2 |
+
|
| 3 |
+
All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification.
|
| 4 |
+
|
| 5 |
+
## Penultimate Layer Features (Pre-Classifier Features)
|
| 6 |
+
|
| 7 |
+
The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features.
|
| 8 |
+
|
| 9 |
+
### Unpooled
|
| 10 |
+
|
| 11 |
+
There are three ways to obtain unpooled features.
|
| 12 |
+
|
| 13 |
+
Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks.
|
| 14 |
+
|
| 15 |
+
If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network.
|
| 16 |
+
|
| 17 |
+
#### forward_features()
|
| 18 |
+
```python hl_lines="3 6"
|
| 19 |
+
import torch
|
| 20 |
+
import timm
|
| 21 |
+
m = timm.create_model('xception41', pretrained=True)
|
| 22 |
+
o = m(torch.randn(2, 3, 299, 299))
|
| 23 |
+
print(f'Original shape: {o.shape}')
|
| 24 |
+
o = m.forward_features(torch.randn(2, 3, 299, 299))
|
| 25 |
+
print(f'Unpooled shape: {o.shape}')
|
| 26 |
+
```
|
| 27 |
+
Output:
|
| 28 |
+
```text
|
| 29 |
+
Original shape: torch.Size([2, 1000])
|
| 30 |
+
Unpooled shape: torch.Size([2, 2048, 10, 10])
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
#### Create with no classifier and pooling
|
| 34 |
+
```python hl_lines="3"
|
| 35 |
+
import torch
|
| 36 |
+
import timm
|
| 37 |
+
m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='')
|
| 38 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 39 |
+
print(f'Unpooled shape: {o.shape}')
|
| 40 |
+
```
|
| 41 |
+
Output:
|
| 42 |
+
```text
|
| 43 |
+
Unpooled shape: torch.Size([2, 2048, 7, 7])
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
#### Remove it later
|
| 47 |
+
```python hl_lines="3 6"
|
| 48 |
+
import torch
|
| 49 |
+
import timm
|
| 50 |
+
m = timm.create_model('densenet121', pretrained=True)
|
| 51 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 52 |
+
print(f'Original shape: {o.shape}')
|
| 53 |
+
m.reset_classifier(0, '')
|
| 54 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 55 |
+
print(f'Unpooled shape: {o.shape}')
|
| 56 |
+
```
|
| 57 |
+
Output:
|
| 58 |
+
```text
|
| 59 |
+
Original shape: torch.Size([2, 1000])
|
| 60 |
+
Unpooled shape: torch.Size([2, 1024, 7, 7])
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### Pooled
|
| 64 |
+
|
| 65 |
+
To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact.
|
| 66 |
+
|
| 67 |
+
#### Create with no classifier
|
| 68 |
+
```python hl_lines="3"
|
| 69 |
+
import torch
|
| 70 |
+
import timm
|
| 71 |
+
m = timm.create_model('resnet50', pretrained=True, num_classes=0)
|
| 72 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 73 |
+
print(f'Pooled shape: {o.shape}')
|
| 74 |
+
```
|
| 75 |
+
Output:
|
| 76 |
+
```text
|
| 77 |
+
Pooled shape: torch.Size([2, 2048])
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
#### Remove it later
|
| 81 |
+
```python hl_lines="3 6"
|
| 82 |
+
import torch
|
| 83 |
+
import timm
|
| 84 |
+
m = timm.create_model('ese_vovnet19b_dw', pretrained=True)
|
| 85 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 86 |
+
print(f'Original shape: {o.shape}')
|
| 87 |
+
m.reset_classifier(0)
|
| 88 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 89 |
+
print(f'Pooled shape: {o.shape}')
|
| 90 |
+
```
|
| 91 |
+
Output:
|
| 92 |
+
```text
|
| 93 |
+
Pooled shape: torch.Size([2, 1024])
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
## Multi-scale Feature Maps (Feature Pyramid)
|
| 98 |
+
|
| 99 |
+
Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library.
|
| 100 |
+
|
| 101 |
+
`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels.
|
| 102 |
+
|
| 103 |
+
A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default 5 strides will be output from most models (not all have that many), with the first starting at 2 (some start at 1 or 4).
|
| 104 |
+
|
| 105 |
+
### Create a feature map extraction model
|
| 106 |
+
```python hl_lines="3"
|
| 107 |
+
import torch
|
| 108 |
+
import timm
|
| 109 |
+
m = timm.create_model('resnest26d', features_only=True, pretrained=True)
|
| 110 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 111 |
+
for x in o:
|
| 112 |
+
print(x.shape)
|
| 113 |
+
```
|
| 114 |
+
Output:
|
| 115 |
+
```text
|
| 116 |
+
torch.Size([2, 64, 112, 112])
|
| 117 |
+
torch.Size([2, 256, 56, 56])
|
| 118 |
+
torch.Size([2, 512, 28, 28])
|
| 119 |
+
torch.Size([2, 1024, 14, 14])
|
| 120 |
+
torch.Size([2, 2048, 7, 7])
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
### Query the feature information
|
| 124 |
+
|
| 125 |
+
After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points.
|
| 126 |
+
|
| 127 |
+
```python hl_lines="3 4"
|
| 128 |
+
import torch
|
| 129 |
+
import timm
|
| 130 |
+
m = timm.create_model('regnety_032', features_only=True, pretrained=True)
|
| 131 |
+
print(f'Feature channels: {m.feature_info.channels()}')
|
| 132 |
+
o = m(torch.randn(2, 3, 224, 224))
|
| 133 |
+
for x in o:
|
| 134 |
+
print(x.shape)
|
| 135 |
+
```
|
| 136 |
+
Output:
|
| 137 |
+
```text
|
| 138 |
+
Feature channels: [32, 72, 216, 576, 1512]
|
| 139 |
+
torch.Size([2, 32, 112, 112])
|
| 140 |
+
torch.Size([2, 72, 56, 56])
|
| 141 |
+
torch.Size([2, 216, 28, 28])
|
| 142 |
+
torch.Size([2, 576, 14, 14])
|
| 143 |
+
torch.Size([2, 1512, 7, 7])
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
### Select specific feature levels or limit the stride
|
| 147 |
+
|
| 148 |
+
There are to additional creation arguments impacting the output features.
|
| 149 |
+
|
| 150 |
+
* `out_indices` selects which indices to output
|
| 151 |
+
* `output_stride` limits the feature output stride of the network (also works in classification mode BTW)
|
| 152 |
+
|
| 153 |
+
`out_indices` is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most models, index 0 is the stride 2 features, and index 4 is stride 32.
|
| 154 |
+
|
| 155 |
+
`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`.
|
| 156 |
+
|
| 157 |
+
```python hl_lines="3 4 5"
|
| 158 |
+
import torch
|
| 159 |
+
import timm
|
| 160 |
+
m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True)
|
| 161 |
+
print(f'Feature channels: {m.feature_info.channels()}')
|
| 162 |
+
print(f'Feature reduction: {m.feature_info.reduction()}')
|
| 163 |
+
o = m(torch.randn(2, 3, 320, 320))
|
| 164 |
+
for x in o:
|
| 165 |
+
print(x.shape)
|
| 166 |
+
```
|
| 167 |
+
Output:
|
| 168 |
+
```text
|
| 169 |
+
Feature channels: [512, 2048]
|
| 170 |
+
Feature reduction: [8, 8]
|
| 171 |
+
torch.Size([2, 512, 40, 40])
|
| 172 |
+
torch.Size([2, 2048, 40, 40])
|
| 173 |
+
```
|
testbed/huggingface__pytorch-image-models/docs/index.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Getting Started
|
| 2 |
+
|
| 3 |
+
## Welcome
|
| 4 |
+
|
| 5 |
+
Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`.
|
| 6 |
+
|
| 7 |
+
For a more comprehensive set of docs (currently under development), please visit [timmdocs](https://fastai.github.io/timmdocs/) by [Aman Arora](https://github.com/amaarora).
|
| 8 |
+
|
| 9 |
+
## Install
|
| 10 |
+
|
| 11 |
+
The library can be installed with pip:
|
| 12 |
+
|
| 13 |
+
```
|
| 14 |
+
pip install timm
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use:
|
| 18 |
+
```
|
| 19 |
+
pip install git+https://github.com/rwightman/pytorch-image-models.git
|
| 20 |
+
```
|
| 21 |
+
|
| 22 |
+
!!! info "Conda Environment"
|
| 23 |
+
All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x., 3.8.x., 3.9
|
| 24 |
+
|
| 25 |
+
Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment.
|
| 26 |
+
|
| 27 |
+
PyTorch versions 1.4, 1.5.x, 1.6, 1.7.x, and 1.8 have been tested with this code.
|
| 28 |
+
|
| 29 |
+
I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda:
|
| 30 |
+
```
|
| 31 |
+
conda create -n torch-env
|
| 32 |
+
conda activate torch-env
|
| 33 |
+
conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c conda-forge
|
| 34 |
+
conda install pyyaml
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## Load a Pretrained Model
|
| 38 |
+
|
| 39 |
+
Pretrained models can be loaded using `timm.create_model`
|
| 40 |
+
|
| 41 |
+
```python
|
| 42 |
+
import timm
|
| 43 |
+
|
| 44 |
+
m = timm.create_model('mobilenetv3_large_100', pretrained=True)
|
| 45 |
+
m.eval()
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
## List Models with Pretrained Weights
|
| 49 |
+
```python
|
| 50 |
+
import timm
|
| 51 |
+
from pprint import pprint
|
| 52 |
+
model_names = timm.list_models(pretrained=True)
|
| 53 |
+
pprint(model_names)
|
| 54 |
+
>>> ['adv_inception_v3',
|
| 55 |
+
'cspdarknet53',
|
| 56 |
+
'cspresnext50',
|
| 57 |
+
'densenet121',
|
| 58 |
+
'densenet161',
|
| 59 |
+
'densenet169',
|
| 60 |
+
'densenet201',
|
| 61 |
+
'densenetblur121d',
|
| 62 |
+
'dla34',
|
| 63 |
+
'dla46_c',
|
| 64 |
+
...
|
| 65 |
+
]
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## List Model Architectures by Wildcard
|
| 69 |
+
```python
|
| 70 |
+
import timm
|
| 71 |
+
from pprint import pprint
|
| 72 |
+
model_names = timm.list_models('*resne*t*')
|
| 73 |
+
pprint(model_names)
|
| 74 |
+
>>> ['cspresnet50',
|
| 75 |
+
'cspresnet50d',
|
| 76 |
+
'cspresnet50w',
|
| 77 |
+
'cspresnext50',
|
| 78 |
+
...
|
| 79 |
+
]
|
| 80 |
+
```
|
testbed/huggingface__pytorch-image-models/docs/models.md
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Model Summaries
|
| 2 |
+
|
| 3 |
+
The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below.
|
| 4 |
+
|
| 5 |
+
Most included models have pretrained weights. The weights are either:
|
| 6 |
+
|
| 7 |
+
1. from their original sources
|
| 8 |
+
2. ported by myself from their original impl in a different framework (e.g. Tensorflow models)
|
| 9 |
+
3. trained from scratch using the included training script
|
| 10 |
+
|
| 11 |
+
The validation results for the pretrained weights are [here](results.md)
|
| 12 |
+
|
| 13 |
+
A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm).
|
| 14 |
+
|
| 15 |
+
## Big Transfer ResNetV2 (BiT) [[resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)]
|
| 16 |
+
* Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370
|
| 17 |
+
* Reference code: https://github.com/google-research/big_transfer
|
| 18 |
+
|
| 19 |
+
## Cross-Stage Partial Networks [[cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)]
|
| 20 |
+
* Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
|
| 21 |
+
* Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks
|
| 22 |
+
|
| 23 |
+
## DenseNet [[densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)]
|
| 24 |
+
* Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993
|
| 25 |
+
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
|
| 26 |
+
|
| 27 |
+
## DLA [[dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)]
|
| 28 |
+
* Paper: https://arxiv.org/abs/1707.06484
|
| 29 |
+
* Code: https://github.com/ucbdrive/dla
|
| 30 |
+
|
| 31 |
+
## Dual-Path Networks [[dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)]
|
| 32 |
+
* Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629
|
| 33 |
+
* My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained
|
| 34 |
+
* Reference code: https://github.com/cypw/DPNs
|
| 35 |
+
|
| 36 |
+
## GPU-Efficient Networks [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)]
|
| 37 |
+
* Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
|
| 38 |
+
* Reference code: https://github.com/idstcv/GPU-Efficient-Networks
|
| 39 |
+
|
| 40 |
+
## HRNet [[hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)]
|
| 41 |
+
* Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919
|
| 42 |
+
* Code: https://github.com/HRNet/HRNet-Image-Classification
|
| 43 |
+
|
| 44 |
+
## Inception-V3 [[inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)]
|
| 45 |
+
* Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567
|
| 46 |
+
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
|
| 47 |
+
|
| 48 |
+
## Inception-V4 [[inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)]
|
| 49 |
+
* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
|
| 50 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 51 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
|
| 52 |
+
|
| 53 |
+
## Inception-ResNet-V2 [[inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)]
|
| 54 |
+
* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
|
| 55 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 56 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
|
| 57 |
+
|
| 58 |
+
## NASNet-A [[nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)]
|
| 59 |
+
* Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012
|
| 60 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 61 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
|
| 62 |
+
|
| 63 |
+
## PNasNet-5 [[pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)]
|
| 64 |
+
* Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559
|
| 65 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 66 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
|
| 67 |
+
|
| 68 |
+
## EfficientNet [[efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)]
|
| 69 |
+
|
| 70 |
+
* Papers:
|
| 71 |
+
* EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
|
| 72 |
+
* EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
|
| 73 |
+
* EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
|
| 74 |
+
* EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
|
| 75 |
+
* MixNet - https://arxiv.org/abs/1907.09595
|
| 76 |
+
* MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
|
| 77 |
+
* MobileNet-V2 - https://arxiv.org/abs/1801.04381
|
| 78 |
+
* FBNet-C - https://arxiv.org/abs/1812.03443
|
| 79 |
+
* Single-Path NAS - https://arxiv.org/abs/1904.02877
|
| 80 |
+
* My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch
|
| 81 |
+
* Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
|
| 82 |
+
|
| 83 |
+
## MobileNet-V3 [[mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)]
|
| 84 |
+
* Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
|
| 85 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
|
| 86 |
+
|
| 87 |
+
## RegNet [[regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)]
|
| 88 |
+
* Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678
|
| 89 |
+
* Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
|
| 90 |
+
|
| 91 |
+
## RepVGG [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)]
|
| 92 |
+
* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
|
| 93 |
+
* Reference code: https://github.com/DingXiaoH/RepVGG
|
| 94 |
+
|
| 95 |
+
## ResNet, ResNeXt [[resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)]
|
| 96 |
+
|
| 97 |
+
* ResNet (V1B)
|
| 98 |
+
* Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385
|
| 99 |
+
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
|
| 100 |
+
* ResNeXt
|
| 101 |
+
* Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431
|
| 102 |
+
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
|
| 103 |
+
* 'Bag of Tricks' / Gluon C, D, E, S ResNet variants
|
| 104 |
+
* Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187
|
| 105 |
+
* Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py
|
| 106 |
+
* Instagram pretrained / ImageNet tuned ResNeXt101
|
| 107 |
+
* Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932
|
| 108 |
+
* Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
|
| 109 |
+
* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts
|
| 110 |
+
* Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546
|
| 111 |
+
* Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
|
| 112 |
+
* Squeeze-and-Excitation Networks
|
| 113 |
+
* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
|
| 114 |
+
* Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated
|
| 115 |
+
* ECAResNet (ECA-Net)
|
| 116 |
+
* Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4
|
| 117 |
+
* Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet
|
| 118 |
+
|
| 119 |
+
## Res2Net [[res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)]
|
| 120 |
+
* Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
|
| 121 |
+
* Code: https://github.com/gasvn/Res2Net
|
| 122 |
+
|
| 123 |
+
## ResNeSt [[resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)]
|
| 124 |
+
* Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
|
| 125 |
+
* Code: https://github.com/zhanghang1989/ResNeSt
|
| 126 |
+
|
| 127 |
+
## ReXNet [[rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)]
|
| 128 |
+
* Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992
|
| 129 |
+
* Code: https://github.com/clovaai/rexnet
|
| 130 |
+
|
| 131 |
+
## Selective-Kernel Networks [[sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)]
|
| 132 |
+
* Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586
|
| 133 |
+
* Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn
|
| 134 |
+
|
| 135 |
+
## SelecSLS [[selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)]
|
| 136 |
+
* Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837
|
| 137 |
+
* Code: https://github.com/mehtadushy/SelecSLS-Pytorch
|
| 138 |
+
|
| 139 |
+
## Squeeze-and-Excitation Networks [[senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)]
|
| 140 |
+
NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py`
|
| 141 |
+
|
| 142 |
+
* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
|
| 143 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 144 |
+
|
| 145 |
+
## TResNet [[tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)]
|
| 146 |
+
* Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630
|
| 147 |
+
* Code: https://github.com/mrT23/TResNet
|
| 148 |
+
|
| 149 |
+
## VGG [[vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)]
|
| 150 |
+
* Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf
|
| 151 |
+
* Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
|
| 152 |
+
|
| 153 |
+
## Vision Transformer [[vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)]
|
| 154 |
+
* Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929
|
| 155 |
+
* Reference code and pretrained weights: https://github.com/google-research/vision_transformer
|
| 156 |
+
|
| 157 |
+
## VovNet V2 and V1 [[vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)]
|
| 158 |
+
* Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
|
| 159 |
+
* Reference code: https://github.com/youngwanLEE/vovnet-detectron2
|
| 160 |
+
|
| 161 |
+
## Xception [[xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)]
|
| 162 |
+
* Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357
|
| 163 |
+
* Code: https://github.com/Cadene/pretrained-models.pytorch
|
| 164 |
+
|
| 165 |
+
## Xception (Modified Aligned, Gluon) [[gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)]
|
| 166 |
+
* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
|
| 167 |
+
* Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/
|
| 168 |
+
|
| 169 |
+
## Xception (Modified Aligned, TF) [[aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)]
|
| 170 |
+
* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
|
| 171 |
+
* Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
|
testbed/huggingface__pytorch-image-models/docs/models/.templates/models/advprop.md
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AdvProp (EfficientNet)
|
| 2 |
+
|
| 3 |
+
**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples.
|
| 4 |
+
|
| 5 |
+
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
|
| 6 |
+
|
| 7 |
+
{% include 'code_snippets.md' %}
|
| 8 |
+
|
| 9 |
+
## How do I train this model?
|
| 10 |
+
|
| 11 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 12 |
+
|
| 13 |
+
## Citation
|
| 14 |
+
|
| 15 |
+
```BibTeX
|
| 16 |
+
@misc{xie2020adversarial,
|
| 17 |
+
title={Adversarial Examples Improve Image Recognition},
|
| 18 |
+
author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le},
|
| 19 |
+
year={2020},
|
| 20 |
+
eprint={1911.09665},
|
| 21 |
+
archivePrefix={arXiv},
|
| 22 |
+
primaryClass={cs.CV}
|
| 23 |
+
}
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
<!--
|
| 27 |
+
Type: model-index
|
| 28 |
+
Collections:
|
| 29 |
+
- Name: AdvProp
|
| 30 |
+
Paper:
|
| 31 |
+
Title: Adversarial Examples Improve Image Recognition
|
| 32 |
+
URL: https://paperswithcode.com/paper/adversarial-examples-improve-image
|
| 33 |
+
Models:
|
| 34 |
+
- Name: tf_efficientnet_b0_ap
|
| 35 |
+
In Collection: AdvProp
|
| 36 |
+
Metadata:
|
| 37 |
+
FLOPs: 488688572
|
| 38 |
+
Parameters: 5290000
|
| 39 |
+
File Size: 21385973
|
| 40 |
+
Architecture:
|
| 41 |
+
- 1x1 Convolution
|
| 42 |
+
- Average Pooling
|
| 43 |
+
- Batch Normalization
|
| 44 |
+
- Convolution
|
| 45 |
+
- Dense Connections
|
| 46 |
+
- Dropout
|
| 47 |
+
- Inverted Residual Block
|
| 48 |
+
- Squeeze-and-Excitation Block
|
| 49 |
+
- Swish
|
| 50 |
+
Tasks:
|
| 51 |
+
- Image Classification
|
| 52 |
+
Training Techniques:
|
| 53 |
+
- AdvProp
|
| 54 |
+
- AutoAugment
|
| 55 |
+
- Label Smoothing
|
| 56 |
+
- RMSProp
|
| 57 |
+
- Stochastic Depth
|
| 58 |
+
- Weight Decay
|
| 59 |
+
Training Data:
|
| 60 |
+
- ImageNet
|
| 61 |
+
ID: tf_efficientnet_b0_ap
|
| 62 |
+
LR: 0.256
|
| 63 |
+
Epochs: 350
|
| 64 |
+
Crop Pct: '0.875'
|
| 65 |
+
Momentum: 0.9
|
| 66 |
+
Batch Size: 2048
|
| 67 |
+
Image Size: '224'
|
| 68 |
+
Weight Decay: 1.0e-05
|
| 69 |
+
Interpolation: bicubic
|
| 70 |
+
RMSProp Decay: 0.9
|
| 71 |
+
Label Smoothing: 0.1
|
| 72 |
+
BatchNorm Momentum: 0.99
|
| 73 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334
|
| 74 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth
|
| 75 |
+
Results:
|
| 76 |
+
- Task: Image Classification
|
| 77 |
+
Dataset: ImageNet
|
| 78 |
+
Metrics:
|
| 79 |
+
Top 1 Accuracy: 77.1%
|
| 80 |
+
Top 5 Accuracy: 93.26%
|
| 81 |
+
- Name: tf_efficientnet_b1_ap
|
| 82 |
+
In Collection: AdvProp
|
| 83 |
+
Metadata:
|
| 84 |
+
FLOPs: 883633200
|
| 85 |
+
Parameters: 7790000
|
| 86 |
+
File Size: 31515350
|
| 87 |
+
Architecture:
|
| 88 |
+
- 1x1 Convolution
|
| 89 |
+
- Average Pooling
|
| 90 |
+
- Batch Normalization
|
| 91 |
+
- Convolution
|
| 92 |
+
- Dense Connections
|
| 93 |
+
- Dropout
|
| 94 |
+
- Inverted Residual Block
|
| 95 |
+
- Squeeze-and-Excitation Block
|
| 96 |
+
- Swish
|
| 97 |
+
Tasks:
|
| 98 |
+
- Image Classification
|
| 99 |
+
Training Techniques:
|
| 100 |
+
- AdvProp
|
| 101 |
+
- AutoAugment
|
| 102 |
+
- Label Smoothing
|
| 103 |
+
- RMSProp
|
| 104 |
+
- Stochastic Depth
|
| 105 |
+
- Weight Decay
|
| 106 |
+
Training Data:
|
| 107 |
+
- ImageNet
|
| 108 |
+
ID: tf_efficientnet_b1_ap
|
| 109 |
+
LR: 0.256
|
| 110 |
+
Epochs: 350
|
| 111 |
+
Crop Pct: '0.882'
|
| 112 |
+
Momentum: 0.9
|
| 113 |
+
Batch Size: 2048
|
| 114 |
+
Image Size: '240'
|
| 115 |
+
Weight Decay: 1.0e-05
|
| 116 |
+
Interpolation: bicubic
|
| 117 |
+
RMSProp Decay: 0.9
|
| 118 |
+
Label Smoothing: 0.1
|
| 119 |
+
BatchNorm Momentum: 0.99
|
| 120 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344
|
| 121 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth
|
| 122 |
+
Results:
|
| 123 |
+
- Task: Image Classification
|
| 124 |
+
Dataset: ImageNet
|
| 125 |
+
Metrics:
|
| 126 |
+
Top 1 Accuracy: 79.28%
|
| 127 |
+
Top 5 Accuracy: 94.3%
|
| 128 |
+
- Name: tf_efficientnet_b2_ap
|
| 129 |
+
In Collection: AdvProp
|
| 130 |
+
Metadata:
|
| 131 |
+
FLOPs: 1234321170
|
| 132 |
+
Parameters: 9110000
|
| 133 |
+
File Size: 36800745
|
| 134 |
+
Architecture:
|
| 135 |
+
- 1x1 Convolution
|
| 136 |
+
- Average Pooling
|
| 137 |
+
- Batch Normalization
|
| 138 |
+
- Convolution
|
| 139 |
+
- Dense Connections
|
| 140 |
+
- Dropout
|
| 141 |
+
- Inverted Residual Block
|
| 142 |
+
- Squeeze-and-Excitation Block
|
| 143 |
+
- Swish
|
| 144 |
+
Tasks:
|
| 145 |
+
- Image Classification
|
| 146 |
+
Training Techniques:
|
| 147 |
+
- AdvProp
|
| 148 |
+
- AutoAugment
|
| 149 |
+
- Label Smoothing
|
| 150 |
+
- RMSProp
|
| 151 |
+
- Stochastic Depth
|
| 152 |
+
- Weight Decay
|
| 153 |
+
Training Data:
|
| 154 |
+
- ImageNet
|
| 155 |
+
ID: tf_efficientnet_b2_ap
|
| 156 |
+
LR: 0.256
|
| 157 |
+
Epochs: 350
|
| 158 |
+
Crop Pct: '0.89'
|
| 159 |
+
Momentum: 0.9
|
| 160 |
+
Batch Size: 2048
|
| 161 |
+
Image Size: '260'
|
| 162 |
+
Weight Decay: 1.0e-05
|
| 163 |
+
Interpolation: bicubic
|
| 164 |
+
RMSProp Decay: 0.9
|
| 165 |
+
Label Smoothing: 0.1
|
| 166 |
+
BatchNorm Momentum: 0.99
|
| 167 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354
|
| 168 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth
|
| 169 |
+
Results:
|
| 170 |
+
- Task: Image Classification
|
| 171 |
+
Dataset: ImageNet
|
| 172 |
+
Metrics:
|
| 173 |
+
Top 1 Accuracy: 80.3%
|
| 174 |
+
Top 5 Accuracy: 95.03%
|
| 175 |
+
- Name: tf_efficientnet_b3_ap
|
| 176 |
+
In Collection: AdvProp
|
| 177 |
+
Metadata:
|
| 178 |
+
FLOPs: 2275247568
|
| 179 |
+
Parameters: 12230000
|
| 180 |
+
File Size: 49384538
|
| 181 |
+
Architecture:
|
| 182 |
+
- 1x1 Convolution
|
| 183 |
+
- Average Pooling
|
| 184 |
+
- Batch Normalization
|
| 185 |
+
- Convolution
|
| 186 |
+
- Dense Connections
|
| 187 |
+
- Dropout
|
| 188 |
+
- Inverted Residual Block
|
| 189 |
+
- Squeeze-and-Excitation Block
|
| 190 |
+
- Swish
|
| 191 |
+
Tasks:
|
| 192 |
+
- Image Classification
|
| 193 |
+
Training Techniques:
|
| 194 |
+
- AdvProp
|
| 195 |
+
- AutoAugment
|
| 196 |
+
- Label Smoothing
|
| 197 |
+
- RMSProp
|
| 198 |
+
- Stochastic Depth
|
| 199 |
+
- Weight Decay
|
| 200 |
+
Training Data:
|
| 201 |
+
- ImageNet
|
| 202 |
+
ID: tf_efficientnet_b3_ap
|
| 203 |
+
LR: 0.256
|
| 204 |
+
Epochs: 350
|
| 205 |
+
Crop Pct: '0.904'
|
| 206 |
+
Momentum: 0.9
|
| 207 |
+
Batch Size: 2048
|
| 208 |
+
Image Size: '300'
|
| 209 |
+
Weight Decay: 1.0e-05
|
| 210 |
+
Interpolation: bicubic
|
| 211 |
+
RMSProp Decay: 0.9
|
| 212 |
+
Label Smoothing: 0.1
|
| 213 |
+
BatchNorm Momentum: 0.99
|
| 214 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364
|
| 215 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth
|
| 216 |
+
Results:
|
| 217 |
+
- Task: Image Classification
|
| 218 |
+
Dataset: ImageNet
|
| 219 |
+
Metrics:
|
| 220 |
+
Top 1 Accuracy: 81.82%
|
| 221 |
+
Top 5 Accuracy: 95.62%
|
| 222 |
+
- Name: tf_efficientnet_b4_ap
|
| 223 |
+
In Collection: AdvProp
|
| 224 |
+
Metadata:
|
| 225 |
+
FLOPs: 5749638672
|
| 226 |
+
Parameters: 19340000
|
| 227 |
+
File Size: 77993585
|
| 228 |
+
Architecture:
|
| 229 |
+
- 1x1 Convolution
|
| 230 |
+
- Average Pooling
|
| 231 |
+
- Batch Normalization
|
| 232 |
+
- Convolution
|
| 233 |
+
- Dense Connections
|
| 234 |
+
- Dropout
|
| 235 |
+
- Inverted Residual Block
|
| 236 |
+
- Squeeze-and-Excitation Block
|
| 237 |
+
- Swish
|
| 238 |
+
Tasks:
|
| 239 |
+
- Image Classification
|
| 240 |
+
Training Techniques:
|
| 241 |
+
- AdvProp
|
| 242 |
+
- AutoAugment
|
| 243 |
+
- Label Smoothing
|
| 244 |
+
- RMSProp
|
| 245 |
+
- Stochastic Depth
|
| 246 |
+
- Weight Decay
|
| 247 |
+
Training Data:
|
| 248 |
+
- ImageNet
|
| 249 |
+
ID: tf_efficientnet_b4_ap
|
| 250 |
+
LR: 0.256
|
| 251 |
+
Epochs: 350
|
| 252 |
+
Crop Pct: '0.922'
|
| 253 |
+
Momentum: 0.9
|
| 254 |
+
Batch Size: 2048
|
| 255 |
+
Image Size: '380'
|
| 256 |
+
Weight Decay: 1.0e-05
|
| 257 |
+
Interpolation: bicubic
|
| 258 |
+
RMSProp Decay: 0.9
|
| 259 |
+
Label Smoothing: 0.1
|
| 260 |
+
BatchNorm Momentum: 0.99
|
| 261 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374
|
| 262 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth
|
| 263 |
+
Results:
|
| 264 |
+
- Task: Image Classification
|
| 265 |
+
Dataset: ImageNet
|
| 266 |
+
Metrics:
|
| 267 |
+
Top 1 Accuracy: 83.26%
|
| 268 |
+
Top 5 Accuracy: 96.39%
|
| 269 |
+
- Name: tf_efficientnet_b5_ap
|
| 270 |
+
In Collection: AdvProp
|
| 271 |
+
Metadata:
|
| 272 |
+
FLOPs: 13176501888
|
| 273 |
+
Parameters: 30390000
|
| 274 |
+
File Size: 122403150
|
| 275 |
+
Architecture:
|
| 276 |
+
- 1x1 Convolution
|
| 277 |
+
- Average Pooling
|
| 278 |
+
- Batch Normalization
|
| 279 |
+
- Convolution
|
| 280 |
+
- Dense Connections
|
| 281 |
+
- Dropout
|
| 282 |
+
- Inverted Residual Block
|
| 283 |
+
- Squeeze-and-Excitation Block
|
| 284 |
+
- Swish
|
| 285 |
+
Tasks:
|
| 286 |
+
- Image Classification
|
| 287 |
+
Training Techniques:
|
| 288 |
+
- AdvProp
|
| 289 |
+
- AutoAugment
|
| 290 |
+
- Label Smoothing
|
| 291 |
+
- RMSProp
|
| 292 |
+
- Stochastic Depth
|
| 293 |
+
- Weight Decay
|
| 294 |
+
Training Data:
|
| 295 |
+
- ImageNet
|
| 296 |
+
ID: tf_efficientnet_b5_ap
|
| 297 |
+
LR: 0.256
|
| 298 |
+
Epochs: 350
|
| 299 |
+
Crop Pct: '0.934'
|
| 300 |
+
Momentum: 0.9
|
| 301 |
+
Batch Size: 2048
|
| 302 |
+
Image Size: '456'
|
| 303 |
+
Weight Decay: 1.0e-05
|
| 304 |
+
Interpolation: bicubic
|
| 305 |
+
RMSProp Decay: 0.9
|
| 306 |
+
Label Smoothing: 0.1
|
| 307 |
+
BatchNorm Momentum: 0.99
|
| 308 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384
|
| 309 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth
|
| 310 |
+
Results:
|
| 311 |
+
- Task: Image Classification
|
| 312 |
+
Dataset: ImageNet
|
| 313 |
+
Metrics:
|
| 314 |
+
Top 1 Accuracy: 84.25%
|
| 315 |
+
Top 5 Accuracy: 96.97%
|
| 316 |
+
- Name: tf_efficientnet_b6_ap
|
| 317 |
+
In Collection: AdvProp
|
| 318 |
+
Metadata:
|
| 319 |
+
FLOPs: 24180518488
|
| 320 |
+
Parameters: 43040000
|
| 321 |
+
File Size: 173237466
|
| 322 |
+
Architecture:
|
| 323 |
+
- 1x1 Convolution
|
| 324 |
+
- Average Pooling
|
| 325 |
+
- Batch Normalization
|
| 326 |
+
- Convolution
|
| 327 |
+
- Dense Connections
|
| 328 |
+
- Dropout
|
| 329 |
+
- Inverted Residual Block
|
| 330 |
+
- Squeeze-and-Excitation Block
|
| 331 |
+
- Swish
|
| 332 |
+
Tasks:
|
| 333 |
+
- Image Classification
|
| 334 |
+
Training Techniques:
|
| 335 |
+
- AdvProp
|
| 336 |
+
- AutoAugment
|
| 337 |
+
- Label Smoothing
|
| 338 |
+
- RMSProp
|
| 339 |
+
- Stochastic Depth
|
| 340 |
+
- Weight Decay
|
| 341 |
+
Training Data:
|
| 342 |
+
- ImageNet
|
| 343 |
+
ID: tf_efficientnet_b6_ap
|
| 344 |
+
LR: 0.256
|
| 345 |
+
Epochs: 350
|
| 346 |
+
Crop Pct: '0.942'
|
| 347 |
+
Momentum: 0.9
|
| 348 |
+
Batch Size: 2048
|
| 349 |
+
Image Size: '528'
|
| 350 |
+
Weight Decay: 1.0e-05
|
| 351 |
+
Interpolation: bicubic
|
| 352 |
+
RMSProp Decay: 0.9
|
| 353 |
+
Label Smoothing: 0.1
|
| 354 |
+
BatchNorm Momentum: 0.99
|
| 355 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394
|
| 356 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth
|
| 357 |
+
Results:
|
| 358 |
+
- Task: Image Classification
|
| 359 |
+
Dataset: ImageNet
|
| 360 |
+
Metrics:
|
| 361 |
+
Top 1 Accuracy: 84.79%
|
| 362 |
+
Top 5 Accuracy: 97.14%
|
| 363 |
+
- Name: tf_efficientnet_b7_ap
|
| 364 |
+
In Collection: AdvProp
|
| 365 |
+
Metadata:
|
| 366 |
+
FLOPs: 48205304880
|
| 367 |
+
Parameters: 66349999
|
| 368 |
+
File Size: 266850607
|
| 369 |
+
Architecture:
|
| 370 |
+
- 1x1 Convolution
|
| 371 |
+
- Average Pooling
|
| 372 |
+
- Batch Normalization
|
| 373 |
+
- Convolution
|
| 374 |
+
- Dense Connections
|
| 375 |
+
- Dropout
|
| 376 |
+
- Inverted Residual Block
|
| 377 |
+
- Squeeze-and-Excitation Block
|
| 378 |
+
- Swish
|
| 379 |
+
Tasks:
|
| 380 |
+
- Image Classification
|
| 381 |
+
Training Techniques:
|
| 382 |
+
- AdvProp
|
| 383 |
+
- AutoAugment
|
| 384 |
+
- Label Smoothing
|
| 385 |
+
- RMSProp
|
| 386 |
+
- Stochastic Depth
|
| 387 |
+
- Weight Decay
|
| 388 |
+
Training Data:
|
| 389 |
+
- ImageNet
|
| 390 |
+
ID: tf_efficientnet_b7_ap
|
| 391 |
+
LR: 0.256
|
| 392 |
+
Epochs: 350
|
| 393 |
+
Crop Pct: '0.949'
|
| 394 |
+
Momentum: 0.9
|
| 395 |
+
Batch Size: 2048
|
| 396 |
+
Image Size: '600'
|
| 397 |
+
Weight Decay: 1.0e-05
|
| 398 |
+
Interpolation: bicubic
|
| 399 |
+
RMSProp Decay: 0.9
|
| 400 |
+
Label Smoothing: 0.1
|
| 401 |
+
BatchNorm Momentum: 0.99
|
| 402 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405
|
| 403 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth
|
| 404 |
+
Results:
|
| 405 |
+
- Task: Image Classification
|
| 406 |
+
Dataset: ImageNet
|
| 407 |
+
Metrics:
|
| 408 |
+
Top 1 Accuracy: 85.12%
|
| 409 |
+
Top 5 Accuracy: 97.25%
|
| 410 |
+
- Name: tf_efficientnet_b8_ap
|
| 411 |
+
In Collection: AdvProp
|
| 412 |
+
Metadata:
|
| 413 |
+
FLOPs: 80962956270
|
| 414 |
+
Parameters: 87410000
|
| 415 |
+
File Size: 351412563
|
| 416 |
+
Architecture:
|
| 417 |
+
- 1x1 Convolution
|
| 418 |
+
- Average Pooling
|
| 419 |
+
- Batch Normalization
|
| 420 |
+
- Convolution
|
| 421 |
+
- Dense Connections
|
| 422 |
+
- Dropout
|
| 423 |
+
- Inverted Residual Block
|
| 424 |
+
- Squeeze-and-Excitation Block
|
| 425 |
+
- Swish
|
| 426 |
+
Tasks:
|
| 427 |
+
- Image Classification
|
| 428 |
+
Training Techniques:
|
| 429 |
+
- AdvProp
|
| 430 |
+
- AutoAugment
|
| 431 |
+
- Label Smoothing
|
| 432 |
+
- RMSProp
|
| 433 |
+
- Stochastic Depth
|
| 434 |
+
- Weight Decay
|
| 435 |
+
Training Data:
|
| 436 |
+
- ImageNet
|
| 437 |
+
ID: tf_efficientnet_b8_ap
|
| 438 |
+
LR: 0.128
|
| 439 |
+
Epochs: 350
|
| 440 |
+
Crop Pct: '0.954'
|
| 441 |
+
Momentum: 0.9
|
| 442 |
+
Batch Size: 2048
|
| 443 |
+
Image Size: '672'
|
| 444 |
+
Weight Decay: 1.0e-05
|
| 445 |
+
Interpolation: bicubic
|
| 446 |
+
RMSProp Decay: 0.9
|
| 447 |
+
Label Smoothing: 0.1
|
| 448 |
+
BatchNorm Momentum: 0.99
|
| 449 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416
|
| 450 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth
|
| 451 |
+
Results:
|
| 452 |
+
- Task: Image Classification
|
| 453 |
+
Dataset: ImageNet
|
| 454 |
+
Metrics:
|
| 455 |
+
Top 1 Accuracy: 85.37%
|
| 456 |
+
Top 5 Accuracy: 97.3%
|
| 457 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/.templates/models/big-transfer.md
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Big Transfer (BiT)
|
| 2 |
+
|
| 3 |
+
**Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet.
|
| 4 |
+
|
| 5 |
+
{% include 'code_snippets.md' %}
|
| 6 |
+
|
| 7 |
+
## How do I train this model?
|
| 8 |
+
|
| 9 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 10 |
+
|
| 11 |
+
## Citation
|
| 12 |
+
|
| 13 |
+
```BibTeX
|
| 14 |
+
@misc{kolesnikov2020big,
|
| 15 |
+
title={Big Transfer (BiT): General Visual Representation Learning},
|
| 16 |
+
author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby},
|
| 17 |
+
year={2020},
|
| 18 |
+
eprint={1912.11370},
|
| 19 |
+
archivePrefix={arXiv},
|
| 20 |
+
primaryClass={cs.CV}
|
| 21 |
+
}
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
<!--
|
| 25 |
+
Type: model-index
|
| 26 |
+
Collections:
|
| 27 |
+
- Name: Big Transfer
|
| 28 |
+
Paper:
|
| 29 |
+
Title: 'Big Transfer (BiT): General Visual Representation Learning'
|
| 30 |
+
URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual
|
| 31 |
+
Models:
|
| 32 |
+
- Name: resnetv2_101x1_bitm
|
| 33 |
+
In Collection: Big Transfer
|
| 34 |
+
Metadata:
|
| 35 |
+
FLOPs: 5330896
|
| 36 |
+
Parameters: 44540000
|
| 37 |
+
File Size: 178256468
|
| 38 |
+
Architecture:
|
| 39 |
+
- 1x1 Convolution
|
| 40 |
+
- Bottleneck Residual Block
|
| 41 |
+
- Convolution
|
| 42 |
+
- Global Average Pooling
|
| 43 |
+
- Group Normalization
|
| 44 |
+
- Max Pooling
|
| 45 |
+
- ReLU
|
| 46 |
+
- Residual Block
|
| 47 |
+
- Residual Connection
|
| 48 |
+
- Softmax
|
| 49 |
+
- Weight Standardization
|
| 50 |
+
Tasks:
|
| 51 |
+
- Image Classification
|
| 52 |
+
Training Techniques:
|
| 53 |
+
- Mixup
|
| 54 |
+
- SGD with Momentum
|
| 55 |
+
- Weight Decay
|
| 56 |
+
Training Data:
|
| 57 |
+
- ImageNet
|
| 58 |
+
- JFT-300M
|
| 59 |
+
Training Resources: Cloud TPUv3-512
|
| 60 |
+
ID: resnetv2_101x1_bitm
|
| 61 |
+
LR: 0.03
|
| 62 |
+
Epochs: 90
|
| 63 |
+
Layers: 101
|
| 64 |
+
Crop Pct: '1.0'
|
| 65 |
+
Momentum: 0.9
|
| 66 |
+
Batch Size: 4096
|
| 67 |
+
Image Size: '480'
|
| 68 |
+
Weight Decay: 0.0001
|
| 69 |
+
Interpolation: bilinear
|
| 70 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444
|
| 71 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz
|
| 72 |
+
Results:
|
| 73 |
+
- Task: Image Classification
|
| 74 |
+
Dataset: ImageNet
|
| 75 |
+
Metrics:
|
| 76 |
+
Top 1 Accuracy: 82.21%
|
| 77 |
+
Top 5 Accuracy: 96.47%
|
| 78 |
+
- Name: resnetv2_101x3_bitm
|
| 79 |
+
In Collection: Big Transfer
|
| 80 |
+
Metadata:
|
| 81 |
+
FLOPs: 15988688
|
| 82 |
+
Parameters: 387930000
|
| 83 |
+
File Size: 1551830100
|
| 84 |
+
Architecture:
|
| 85 |
+
- 1x1 Convolution
|
| 86 |
+
- Bottleneck Residual Block
|
| 87 |
+
- Convolution
|
| 88 |
+
- Global Average Pooling
|
| 89 |
+
- Group Normalization
|
| 90 |
+
- Max Pooling
|
| 91 |
+
- ReLU
|
| 92 |
+
- Residual Block
|
| 93 |
+
- Residual Connection
|
| 94 |
+
- Softmax
|
| 95 |
+
- Weight Standardization
|
| 96 |
+
Tasks:
|
| 97 |
+
- Image Classification
|
| 98 |
+
Training Techniques:
|
| 99 |
+
- Mixup
|
| 100 |
+
- SGD with Momentum
|
| 101 |
+
- Weight Decay
|
| 102 |
+
Training Data:
|
| 103 |
+
- ImageNet
|
| 104 |
+
- JFT-300M
|
| 105 |
+
Training Resources: Cloud TPUv3-512
|
| 106 |
+
ID: resnetv2_101x3_bitm
|
| 107 |
+
LR: 0.03
|
| 108 |
+
Epochs: 90
|
| 109 |
+
Layers: 101
|
| 110 |
+
Crop Pct: '1.0'
|
| 111 |
+
Momentum: 0.9
|
| 112 |
+
Batch Size: 4096
|
| 113 |
+
Image Size: '480'
|
| 114 |
+
Weight Decay: 0.0001
|
| 115 |
+
Interpolation: bilinear
|
| 116 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451
|
| 117 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz
|
| 118 |
+
Results:
|
| 119 |
+
- Task: Image Classification
|
| 120 |
+
Dataset: ImageNet
|
| 121 |
+
Metrics:
|
| 122 |
+
Top 1 Accuracy: 84.38%
|
| 123 |
+
Top 5 Accuracy: 97.37%
|
| 124 |
+
- Name: resnetv2_152x2_bitm
|
| 125 |
+
In Collection: Big Transfer
|
| 126 |
+
Metadata:
|
| 127 |
+
FLOPs: 10659792
|
| 128 |
+
Parameters: 236340000
|
| 129 |
+
File Size: 945476668
|
| 130 |
+
Architecture:
|
| 131 |
+
- 1x1 Convolution
|
| 132 |
+
- Bottleneck Residual Block
|
| 133 |
+
- Convolution
|
| 134 |
+
- Global Average Pooling
|
| 135 |
+
- Group Normalization
|
| 136 |
+
- Max Pooling
|
| 137 |
+
- ReLU
|
| 138 |
+
- Residual Block
|
| 139 |
+
- Residual Connection
|
| 140 |
+
- Softmax
|
| 141 |
+
- Weight Standardization
|
| 142 |
+
Tasks:
|
| 143 |
+
- Image Classification
|
| 144 |
+
Training Techniques:
|
| 145 |
+
- Mixup
|
| 146 |
+
- SGD with Momentum
|
| 147 |
+
- Weight Decay
|
| 148 |
+
Training Data:
|
| 149 |
+
- ImageNet
|
| 150 |
+
- JFT-300M
|
| 151 |
+
ID: resnetv2_152x2_bitm
|
| 152 |
+
Crop Pct: '1.0'
|
| 153 |
+
Image Size: '480'
|
| 154 |
+
Interpolation: bilinear
|
| 155 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458
|
| 156 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz
|
| 157 |
+
Results:
|
| 158 |
+
- Task: Image Classification
|
| 159 |
+
Dataset: ImageNet
|
| 160 |
+
Metrics:
|
| 161 |
+
Top 1 Accuracy: 84.4%
|
| 162 |
+
Top 5 Accuracy: 97.43%
|
| 163 |
+
- Name: resnetv2_152x4_bitm
|
| 164 |
+
In Collection: Big Transfer
|
| 165 |
+
Metadata:
|
| 166 |
+
FLOPs: 21317584
|
| 167 |
+
Parameters: 936530000
|
| 168 |
+
File Size: 3746270104
|
| 169 |
+
Architecture:
|
| 170 |
+
- 1x1 Convolution
|
| 171 |
+
- Bottleneck Residual Block
|
| 172 |
+
- Convolution
|
| 173 |
+
- Global Average Pooling
|
| 174 |
+
- Group Normalization
|
| 175 |
+
- Max Pooling
|
| 176 |
+
- ReLU
|
| 177 |
+
- Residual Block
|
| 178 |
+
- Residual Connection
|
| 179 |
+
- Softmax
|
| 180 |
+
- Weight Standardization
|
| 181 |
+
Tasks:
|
| 182 |
+
- Image Classification
|
| 183 |
+
Training Techniques:
|
| 184 |
+
- Mixup
|
| 185 |
+
- SGD with Momentum
|
| 186 |
+
- Weight Decay
|
| 187 |
+
Training Data:
|
| 188 |
+
- ImageNet
|
| 189 |
+
- JFT-300M
|
| 190 |
+
Training Resources: Cloud TPUv3-512
|
| 191 |
+
ID: resnetv2_152x4_bitm
|
| 192 |
+
Crop Pct: '1.0'
|
| 193 |
+
Image Size: '480'
|
| 194 |
+
Interpolation: bilinear
|
| 195 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465
|
| 196 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz
|
| 197 |
+
Results:
|
| 198 |
+
- Task: Image Classification
|
| 199 |
+
Dataset: ImageNet
|
| 200 |
+
Metrics:
|
| 201 |
+
Top 1 Accuracy: 84.95%
|
| 202 |
+
Top 5 Accuracy: 97.45%
|
| 203 |
+
- Name: resnetv2_50x1_bitm
|
| 204 |
+
In Collection: Big Transfer
|
| 205 |
+
Metadata:
|
| 206 |
+
FLOPs: 5330896
|
| 207 |
+
Parameters: 25550000
|
| 208 |
+
File Size: 102242668
|
| 209 |
+
Architecture:
|
| 210 |
+
- 1x1 Convolution
|
| 211 |
+
- Bottleneck Residual Block
|
| 212 |
+
- Convolution
|
| 213 |
+
- Global Average Pooling
|
| 214 |
+
- Group Normalization
|
| 215 |
+
- Max Pooling
|
| 216 |
+
- ReLU
|
| 217 |
+
- Residual Block
|
| 218 |
+
- Residual Connection
|
| 219 |
+
- Softmax
|
| 220 |
+
- Weight Standardization
|
| 221 |
+
Tasks:
|
| 222 |
+
- Image Classification
|
| 223 |
+
Training Techniques:
|
| 224 |
+
- Mixup
|
| 225 |
+
- SGD with Momentum
|
| 226 |
+
- Weight Decay
|
| 227 |
+
Training Data:
|
| 228 |
+
- ImageNet
|
| 229 |
+
- JFT-300M
|
| 230 |
+
Training Resources: Cloud TPUv3-512
|
| 231 |
+
ID: resnetv2_50x1_bitm
|
| 232 |
+
LR: 0.03
|
| 233 |
+
Epochs: 90
|
| 234 |
+
Layers: 50
|
| 235 |
+
Crop Pct: '1.0'
|
| 236 |
+
Momentum: 0.9
|
| 237 |
+
Batch Size: 4096
|
| 238 |
+
Image Size: '480'
|
| 239 |
+
Weight Decay: 0.0001
|
| 240 |
+
Interpolation: bilinear
|
| 241 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430
|
| 242 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz
|
| 243 |
+
Results:
|
| 244 |
+
- Task: Image Classification
|
| 245 |
+
Dataset: ImageNet
|
| 246 |
+
Metrics:
|
| 247 |
+
Top 1 Accuracy: 80.19%
|
| 248 |
+
Top 5 Accuracy: 95.63%
|
| 249 |
+
- Name: resnetv2_50x3_bitm
|
| 250 |
+
In Collection: Big Transfer
|
| 251 |
+
Metadata:
|
| 252 |
+
FLOPs: 15988688
|
| 253 |
+
Parameters: 217320000
|
| 254 |
+
File Size: 869321580
|
| 255 |
+
Architecture:
|
| 256 |
+
- 1x1 Convolution
|
| 257 |
+
- Bottleneck Residual Block
|
| 258 |
+
- Convolution
|
| 259 |
+
- Global Average Pooling
|
| 260 |
+
- Group Normalization
|
| 261 |
+
- Max Pooling
|
| 262 |
+
- ReLU
|
| 263 |
+
- Residual Block
|
| 264 |
+
- Residual Connection
|
| 265 |
+
- Softmax
|
| 266 |
+
- Weight Standardization
|
| 267 |
+
Tasks:
|
| 268 |
+
- Image Classification
|
| 269 |
+
Training Techniques:
|
| 270 |
+
- Mixup
|
| 271 |
+
- SGD with Momentum
|
| 272 |
+
- Weight Decay
|
| 273 |
+
Training Data:
|
| 274 |
+
- ImageNet
|
| 275 |
+
- JFT-300M
|
| 276 |
+
Training Resources: Cloud TPUv3-512
|
| 277 |
+
ID: resnetv2_50x3_bitm
|
| 278 |
+
LR: 0.03
|
| 279 |
+
Epochs: 90
|
| 280 |
+
Layers: 50
|
| 281 |
+
Crop Pct: '1.0'
|
| 282 |
+
Momentum: 0.9
|
| 283 |
+
Batch Size: 4096
|
| 284 |
+
Image Size: '480'
|
| 285 |
+
Weight Decay: 0.0001
|
| 286 |
+
Interpolation: bilinear
|
| 287 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437
|
| 288 |
+
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz
|
| 289 |
+
Results:
|
| 290 |
+
- Task: Image Classification
|
| 291 |
+
Dataset: ImageNet
|
| 292 |
+
Metrics:
|
| 293 |
+
Top 1 Accuracy: 83.75%
|
| 294 |
+
Top 5 Accuracy: 97.12%
|
| 295 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/.templates/models/csp-darknet.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CSP-DarkNet
|
| 2 |
+
|
| 3 |
+
**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
|
| 4 |
+
|
| 5 |
+
This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4).
|
| 6 |
+
|
| 7 |
+
{% include 'code_snippets.md' %}
|
| 8 |
+
|
| 9 |
+
## How do I train this model?
|
| 10 |
+
|
| 11 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 12 |
+
|
| 13 |
+
## Citation
|
| 14 |
+
|
| 15 |
+
```BibTeX
|
| 16 |
+
@misc{bochkovskiy2020yolov4,
|
| 17 |
+
title={YOLOv4: Optimal Speed and Accuracy of Object Detection},
|
| 18 |
+
author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao},
|
| 19 |
+
year={2020},
|
| 20 |
+
eprint={2004.10934},
|
| 21 |
+
archivePrefix={arXiv},
|
| 22 |
+
primaryClass={cs.CV}
|
| 23 |
+
}
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
<!--
|
| 27 |
+
Type: model-index
|
| 28 |
+
Collections:
|
| 29 |
+
- Name: CSP DarkNet
|
| 30 |
+
Paper:
|
| 31 |
+
Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection'
|
| 32 |
+
URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object
|
| 33 |
+
Models:
|
| 34 |
+
- Name: cspdarknet53
|
| 35 |
+
In Collection: CSP DarkNet
|
| 36 |
+
Metadata:
|
| 37 |
+
FLOPs: 8545018880
|
| 38 |
+
Parameters: 27640000
|
| 39 |
+
File Size: 110775135
|
| 40 |
+
Architecture:
|
| 41 |
+
- 1x1 Convolution
|
| 42 |
+
- Batch Normalization
|
| 43 |
+
- Convolution
|
| 44 |
+
- Global Average Pooling
|
| 45 |
+
- Mish
|
| 46 |
+
- Residual Connection
|
| 47 |
+
- Softmax
|
| 48 |
+
Tasks:
|
| 49 |
+
- Image Classification
|
| 50 |
+
Training Techniques:
|
| 51 |
+
- CutMix
|
| 52 |
+
- Label Smoothing
|
| 53 |
+
- Mosaic
|
| 54 |
+
- Polynomial Learning Rate Decay
|
| 55 |
+
- SGD with Momentum
|
| 56 |
+
- Self-Adversarial Training
|
| 57 |
+
- Weight Decay
|
| 58 |
+
Training Data:
|
| 59 |
+
- ImageNet
|
| 60 |
+
Training Resources: 1x NVIDIA RTX 2070 GPU
|
| 61 |
+
ID: cspdarknet53
|
| 62 |
+
LR: 0.1
|
| 63 |
+
Layers: 53
|
| 64 |
+
Crop Pct: '0.887'
|
| 65 |
+
Momentum: 0.9
|
| 66 |
+
Batch Size: 128
|
| 67 |
+
Image Size: '256'
|
| 68 |
+
Warmup Steps: 1000
|
| 69 |
+
Weight Decay: 0.0005
|
| 70 |
+
Interpolation: bilinear
|
| 71 |
+
Training Steps: 8000000
|
| 72 |
+
FPS (GPU RTX 2070): 66
|
| 73 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441
|
| 74 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
|
| 75 |
+
Results:
|
| 76 |
+
- Task: Image Classification
|
| 77 |
+
Dataset: ImageNet
|
| 78 |
+
Metrics:
|
| 79 |
+
Top 1 Accuracy: 80.05%
|
| 80 |
+
Top 5 Accuracy: 95.09%
|
| 81 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/.templates/models/dpn.md
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dual Path Network (DPN)
|
| 2 |
+
|
| 3 |
+
A **Dual Path Network (DPN)** is a convolutional neural network which presents a new topology of connection paths internally. The intuition is that [ResNets](https://paperswithcode.com/method/resnet) enables feature re-usage while DenseNet enables new feature exploration, and both are important for learning good representations. To enjoy the benefits from both path topologies, Dual Path Networks share common features while maintaining the flexibility to explore new features through dual path architectures.
|
| 4 |
+
|
| 5 |
+
The principal building block is an [DPN Block](https://paperswithcode.com/method/dpn-block).
|
| 6 |
+
|
| 7 |
+
{% include 'code_snippets.md' %}
|
| 8 |
+
|
| 9 |
+
## How do I train this model?
|
| 10 |
+
|
| 11 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 12 |
+
|
| 13 |
+
## Citation
|
| 14 |
+
|
| 15 |
+
```BibTeX
|
| 16 |
+
@misc{chen2017dual,
|
| 17 |
+
title={Dual Path Networks},
|
| 18 |
+
author={Yunpeng Chen and Jianan Li and Huaxin Xiao and Xiaojie Jin and Shuicheng Yan and Jiashi Feng},
|
| 19 |
+
year={2017},
|
| 20 |
+
eprint={1707.01629},
|
| 21 |
+
archivePrefix={arXiv},
|
| 22 |
+
primaryClass={cs.CV}
|
| 23 |
+
}
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
<!--
|
| 27 |
+
Type: model-index
|
| 28 |
+
Collections:
|
| 29 |
+
- Name: DPN
|
| 30 |
+
Paper:
|
| 31 |
+
Title: Dual Path Networks
|
| 32 |
+
URL: https://paperswithcode.com/paper/dual-path-networks
|
| 33 |
+
Models:
|
| 34 |
+
- Name: dpn107
|
| 35 |
+
In Collection: DPN
|
| 36 |
+
Metadata:
|
| 37 |
+
FLOPs: 23524280296
|
| 38 |
+
Parameters: 86920000
|
| 39 |
+
File Size: 348612331
|
| 40 |
+
Architecture:
|
| 41 |
+
- Batch Normalization
|
| 42 |
+
- Convolution
|
| 43 |
+
- DPN Block
|
| 44 |
+
- Dense Connections
|
| 45 |
+
- Global Average Pooling
|
| 46 |
+
- Max Pooling
|
| 47 |
+
- Softmax
|
| 48 |
+
Tasks:
|
| 49 |
+
- Image Classification
|
| 50 |
+
Training Techniques:
|
| 51 |
+
- SGD with Momentum
|
| 52 |
+
- Weight Decay
|
| 53 |
+
Training Data:
|
| 54 |
+
- ImageNet
|
| 55 |
+
Training Resources: 40x K80 GPUs
|
| 56 |
+
ID: dpn107
|
| 57 |
+
LR: 0.316
|
| 58 |
+
Layers: 107
|
| 59 |
+
Crop Pct: '0.875'
|
| 60 |
+
Batch Size: 1280
|
| 61 |
+
Image Size: '224'
|
| 62 |
+
Interpolation: bicubic
|
| 63 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L310
|
| 64 |
+
Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth
|
| 65 |
+
Results:
|
| 66 |
+
- Task: Image Classification
|
| 67 |
+
Dataset: ImageNet
|
| 68 |
+
Metrics:
|
| 69 |
+
Top 1 Accuracy: 80.16%
|
| 70 |
+
Top 5 Accuracy: 94.91%
|
| 71 |
+
- Name: dpn131
|
| 72 |
+
In Collection: DPN
|
| 73 |
+
Metadata:
|
| 74 |
+
FLOPs: 20586274792
|
| 75 |
+
Parameters: 79250000
|
| 76 |
+
File Size: 318016207
|
| 77 |
+
Architecture:
|
| 78 |
+
- Batch Normalization
|
| 79 |
+
- Convolution
|
| 80 |
+
- DPN Block
|
| 81 |
+
- Dense Connections
|
| 82 |
+
- Global Average Pooling
|
| 83 |
+
- Max Pooling
|
| 84 |
+
- Softmax
|
| 85 |
+
Tasks:
|
| 86 |
+
- Image Classification
|
| 87 |
+
Training Techniques:
|
| 88 |
+
- SGD with Momentum
|
| 89 |
+
- Weight Decay
|
| 90 |
+
Training Data:
|
| 91 |
+
- ImageNet
|
| 92 |
+
Training Resources: 40x K80 GPUs
|
| 93 |
+
ID: dpn131
|
| 94 |
+
LR: 0.316
|
| 95 |
+
Layers: 131
|
| 96 |
+
Crop Pct: '0.875'
|
| 97 |
+
Batch Size: 960
|
| 98 |
+
Image Size: '224'
|
| 99 |
+
Interpolation: bicubic
|
| 100 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L302
|
| 101 |
+
Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth
|
| 102 |
+
Results:
|
| 103 |
+
- Task: Image Classification
|
| 104 |
+
Dataset: ImageNet
|
| 105 |
+
Metrics:
|
| 106 |
+
Top 1 Accuracy: 79.83%
|
| 107 |
+
Top 5 Accuracy: 94.71%
|
| 108 |
+
- Name: dpn68
|
| 109 |
+
In Collection: DPN
|
| 110 |
+
Metadata:
|
| 111 |
+
FLOPs: 2990567880
|
| 112 |
+
Parameters: 12610000
|
| 113 |
+
File Size: 50761994
|
| 114 |
+
Architecture:
|
| 115 |
+
- Batch Normalization
|
| 116 |
+
- Convolution
|
| 117 |
+
- DPN Block
|
| 118 |
+
- Dense Connections
|
| 119 |
+
- Global Average Pooling
|
| 120 |
+
- Max Pooling
|
| 121 |
+
- Softmax
|
| 122 |
+
Tasks:
|
| 123 |
+
- Image Classification
|
| 124 |
+
Training Techniques:
|
| 125 |
+
- SGD with Momentum
|
| 126 |
+
- Weight Decay
|
| 127 |
+
Training Data:
|
| 128 |
+
- ImageNet
|
| 129 |
+
Training Resources: 40x K80 GPUs
|
| 130 |
+
ID: dpn68
|
| 131 |
+
LR: 0.316
|
| 132 |
+
Layers: 68
|
| 133 |
+
Crop Pct: '0.875'
|
| 134 |
+
Batch Size: 1280
|
| 135 |
+
Image Size: '224'
|
| 136 |
+
Interpolation: bicubic
|
| 137 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L270
|
| 138 |
+
Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth
|
| 139 |
+
Results:
|
| 140 |
+
- Task: Image Classification
|
| 141 |
+
Dataset: ImageNet
|
| 142 |
+
Metrics:
|
| 143 |
+
Top 1 Accuracy: 76.31%
|
| 144 |
+
Top 5 Accuracy: 92.97%
|
| 145 |
+
- Name: dpn68b
|
| 146 |
+
In Collection: DPN
|
| 147 |
+
Metadata:
|
| 148 |
+
FLOPs: 2990567880
|
| 149 |
+
Parameters: 12610000
|
| 150 |
+
File Size: 50781025
|
| 151 |
+
Architecture:
|
| 152 |
+
- Batch Normalization
|
| 153 |
+
- Convolution
|
| 154 |
+
- DPN Block
|
| 155 |
+
- Dense Connections
|
| 156 |
+
- Global Average Pooling
|
| 157 |
+
- Max Pooling
|
| 158 |
+
- Softmax
|
| 159 |
+
Tasks:
|
| 160 |
+
- Image Classification
|
| 161 |
+
Training Techniques:
|
| 162 |
+
- SGD with Momentum
|
| 163 |
+
- Weight Decay
|
| 164 |
+
Training Data:
|
| 165 |
+
- ImageNet
|
| 166 |
+
Training Resources: 40x K80 GPUs
|
| 167 |
+
ID: dpn68b
|
| 168 |
+
LR: 0.316
|
| 169 |
+
Layers: 68
|
| 170 |
+
Crop Pct: '0.875'
|
| 171 |
+
Batch Size: 1280
|
| 172 |
+
Image Size: '224'
|
| 173 |
+
Interpolation: bicubic
|
| 174 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L278
|
| 175 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth
|
| 176 |
+
Results:
|
| 177 |
+
- Task: Image Classification
|
| 178 |
+
Dataset: ImageNet
|
| 179 |
+
Metrics:
|
| 180 |
+
Top 1 Accuracy: 79.21%
|
| 181 |
+
Top 5 Accuracy: 94.42%
|
| 182 |
+
- Name: dpn92
|
| 183 |
+
In Collection: DPN
|
| 184 |
+
Metadata:
|
| 185 |
+
FLOPs: 8357659624
|
| 186 |
+
Parameters: 37670000
|
| 187 |
+
File Size: 151248422
|
| 188 |
+
Architecture:
|
| 189 |
+
- Batch Normalization
|
| 190 |
+
- Convolution
|
| 191 |
+
- DPN Block
|
| 192 |
+
- Dense Connections
|
| 193 |
+
- Global Average Pooling
|
| 194 |
+
- Max Pooling
|
| 195 |
+
- Softmax
|
| 196 |
+
Tasks:
|
| 197 |
+
- Image Classification
|
| 198 |
+
Training Techniques:
|
| 199 |
+
- SGD with Momentum
|
| 200 |
+
- Weight Decay
|
| 201 |
+
Training Data:
|
| 202 |
+
- ImageNet
|
| 203 |
+
Training Resources: 40x K80 GPUs
|
| 204 |
+
ID: dpn92
|
| 205 |
+
LR: 0.316
|
| 206 |
+
Layers: 92
|
| 207 |
+
Crop Pct: '0.875'
|
| 208 |
+
Batch Size: 1280
|
| 209 |
+
Image Size: '224'
|
| 210 |
+
Interpolation: bicubic
|
| 211 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L286
|
| 212 |
+
Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth
|
| 213 |
+
Results:
|
| 214 |
+
- Task: Image Classification
|
| 215 |
+
Dataset: ImageNet
|
| 216 |
+
Metrics:
|
| 217 |
+
Top 1 Accuracy: 79.99%
|
| 218 |
+
Top 5 Accuracy: 94.84%
|
| 219 |
+
- Name: dpn98
|
| 220 |
+
In Collection: DPN
|
| 221 |
+
Metadata:
|
| 222 |
+
FLOPs: 15003675112
|
| 223 |
+
Parameters: 61570000
|
| 224 |
+
File Size: 247021307
|
| 225 |
+
Architecture:
|
| 226 |
+
- Batch Normalization
|
| 227 |
+
- Convolution
|
| 228 |
+
- DPN Block
|
| 229 |
+
- Dense Connections
|
| 230 |
+
- Global Average Pooling
|
| 231 |
+
- Max Pooling
|
| 232 |
+
- Softmax
|
| 233 |
+
Tasks:
|
| 234 |
+
- Image Classification
|
| 235 |
+
Training Techniques:
|
| 236 |
+
- SGD with Momentum
|
| 237 |
+
- Weight Decay
|
| 238 |
+
Training Data:
|
| 239 |
+
- ImageNet
|
| 240 |
+
Training Resources: 40x K80 GPUs
|
| 241 |
+
ID: dpn98
|
| 242 |
+
LR: 0.4
|
| 243 |
+
Layers: 98
|
| 244 |
+
Crop Pct: '0.875'
|
| 245 |
+
Batch Size: 1280
|
| 246 |
+
Image Size: '224'
|
| 247 |
+
Interpolation: bicubic
|
| 248 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dpn.py#L294
|
| 249 |
+
Weights: https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth
|
| 250 |
+
Results:
|
| 251 |
+
- Task: Image Classification
|
| 252 |
+
Dataset: ImageNet
|
| 253 |
+
Metrics:
|
| 254 |
+
Top 1 Accuracy: 79.65%
|
| 255 |
+
Top 5 Accuracy: 94.61%
|
| 256 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/gloun-senet.md
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# (Gluon) SENet
|
| 2 |
+
|
| 3 |
+
A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
|
| 6 |
+
|
| 7 |
+
## How do I use this model on an image?
|
| 8 |
+
To load a pretrained model:
|
| 9 |
+
|
| 10 |
+
```python
|
| 11 |
+
import timm
|
| 12 |
+
model = timm.create_model('gluon_senet154', pretrained=True)
|
| 13 |
+
model.eval()
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
To load and preprocess the image:
|
| 17 |
+
```python
|
| 18 |
+
import urllib
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from timm.data import resolve_data_config
|
| 21 |
+
from timm.data.transforms_factory import create_transform
|
| 22 |
+
|
| 23 |
+
config = resolve_data_config({}, model=model)
|
| 24 |
+
transform = create_transform(**config)
|
| 25 |
+
|
| 26 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 27 |
+
urllib.request.urlretrieve(url, filename)
|
| 28 |
+
img = Image.open(filename).convert('RGB')
|
| 29 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
To get the model predictions:
|
| 33 |
+
```python
|
| 34 |
+
import torch
|
| 35 |
+
with torch.no_grad():
|
| 36 |
+
out = model(tensor)
|
| 37 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 38 |
+
print(probabilities.shape)
|
| 39 |
+
# prints: torch.Size([1000])
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
To get the top-5 predictions class names:
|
| 43 |
+
```python
|
| 44 |
+
# Get imagenet class mappings
|
| 45 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 46 |
+
urllib.request.urlretrieve(url, filename)
|
| 47 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 48 |
+
categories = [s.strip() for s in f.readlines()]
|
| 49 |
+
|
| 50 |
+
# Print top categories per image
|
| 51 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 52 |
+
for i in range(top5_prob.size(0)):
|
| 53 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 54 |
+
# prints class names and probabilities like:
|
| 55 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
Replace the model name with the variant you want to use, e.g. `gluon_senet154`. You can find the IDs in the model summaries at the top of this page.
|
| 59 |
+
|
| 60 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 61 |
+
|
| 62 |
+
## How do I finetune this model?
|
| 63 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 64 |
+
```python
|
| 65 |
+
model = timm.create_model('gluon_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 66 |
+
```
|
| 67 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 68 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 69 |
+
|
| 70 |
+
## How do I train this model?
|
| 71 |
+
|
| 72 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 73 |
+
|
| 74 |
+
## Citation
|
| 75 |
+
|
| 76 |
+
```BibTeX
|
| 77 |
+
@misc{hu2019squeezeandexcitation,
|
| 78 |
+
title={Squeeze-and-Excitation Networks},
|
| 79 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 80 |
+
year={2019},
|
| 81 |
+
eprint={1709.01507},
|
| 82 |
+
archivePrefix={arXiv},
|
| 83 |
+
primaryClass={cs.CV}
|
| 84 |
+
}
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
<!--
|
| 88 |
+
Type: model-index
|
| 89 |
+
Collections:
|
| 90 |
+
- Name: Gloun SENet
|
| 91 |
+
Paper:
|
| 92 |
+
Title: Squeeze-and-Excitation Networks
|
| 93 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 94 |
+
Models:
|
| 95 |
+
- Name: gluon_senet154
|
| 96 |
+
In Collection: Gloun SENet
|
| 97 |
+
Metadata:
|
| 98 |
+
FLOPs: 26681705136
|
| 99 |
+
Parameters: 115090000
|
| 100 |
+
File Size: 461546622
|
| 101 |
+
Architecture:
|
| 102 |
+
- Convolution
|
| 103 |
+
- Dense Connections
|
| 104 |
+
- Global Average Pooling
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- Softmax
|
| 107 |
+
- Squeeze-and-Excitation Block
|
| 108 |
+
Tasks:
|
| 109 |
+
- Image Classification
|
| 110 |
+
Training Data:
|
| 111 |
+
- ImageNet
|
| 112 |
+
ID: gluon_senet154
|
| 113 |
+
Crop Pct: '0.875'
|
| 114 |
+
Image Size: '224'
|
| 115 |
+
Interpolation: bicubic
|
| 116 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L239
|
| 117 |
+
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth
|
| 118 |
+
Results:
|
| 119 |
+
- Task: Image Classification
|
| 120 |
+
Dataset: ImageNet
|
| 121 |
+
Metrics:
|
| 122 |
+
Top 1 Accuracy: 81.23%
|
| 123 |
+
Top 5 Accuracy: 95.35%
|
| 124 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/gloun-seresnext.md
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# (Gluon) SE-ResNeXt
|
| 2 |
+
|
| 3 |
+
**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
|
| 6 |
+
|
| 7 |
+
## How do I use this model on an image?
|
| 8 |
+
To load a pretrained model:
|
| 9 |
+
|
| 10 |
+
```python
|
| 11 |
+
import timm
|
| 12 |
+
model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True)
|
| 13 |
+
model.eval()
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
To load and preprocess the image:
|
| 17 |
+
```python
|
| 18 |
+
import urllib
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from timm.data import resolve_data_config
|
| 21 |
+
from timm.data.transforms_factory import create_transform
|
| 22 |
+
|
| 23 |
+
config = resolve_data_config({}, model=model)
|
| 24 |
+
transform = create_transform(**config)
|
| 25 |
+
|
| 26 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 27 |
+
urllib.request.urlretrieve(url, filename)
|
| 28 |
+
img = Image.open(filename).convert('RGB')
|
| 29 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
To get the model predictions:
|
| 33 |
+
```python
|
| 34 |
+
import torch
|
| 35 |
+
with torch.no_grad():
|
| 36 |
+
out = model(tensor)
|
| 37 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 38 |
+
print(probabilities.shape)
|
| 39 |
+
# prints: torch.Size([1000])
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
To get the top-5 predictions class names:
|
| 43 |
+
```python
|
| 44 |
+
# Get imagenet class mappings
|
| 45 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 46 |
+
urllib.request.urlretrieve(url, filename)
|
| 47 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 48 |
+
categories = [s.strip() for s in f.readlines()]
|
| 49 |
+
|
| 50 |
+
# Print top categories per image
|
| 51 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 52 |
+
for i in range(top5_prob.size(0)):
|
| 53 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 54 |
+
# prints class names and probabilities like:
|
| 55 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
Replace the model name with the variant you want to use, e.g. `gluon_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
|
| 59 |
+
|
| 60 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 61 |
+
|
| 62 |
+
## How do I finetune this model?
|
| 63 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 64 |
+
```python
|
| 65 |
+
model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 66 |
+
```
|
| 67 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 68 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 69 |
+
|
| 70 |
+
## How do I train this model?
|
| 71 |
+
|
| 72 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 73 |
+
|
| 74 |
+
## Citation
|
| 75 |
+
|
| 76 |
+
```BibTeX
|
| 77 |
+
@misc{hu2019squeezeandexcitation,
|
| 78 |
+
title={Squeeze-and-Excitation Networks},
|
| 79 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 80 |
+
year={2019},
|
| 81 |
+
eprint={1709.01507},
|
| 82 |
+
archivePrefix={arXiv},
|
| 83 |
+
primaryClass={cs.CV}
|
| 84 |
+
}
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
<!--
|
| 88 |
+
Type: model-index
|
| 89 |
+
Collections:
|
| 90 |
+
- Name: Gloun SEResNeXt
|
| 91 |
+
Paper:
|
| 92 |
+
Title: Squeeze-and-Excitation Networks
|
| 93 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 94 |
+
Models:
|
| 95 |
+
- Name: gluon_seresnext101_32x4d
|
| 96 |
+
In Collection: Gloun SEResNeXt
|
| 97 |
+
Metadata:
|
| 98 |
+
FLOPs: 10302923504
|
| 99 |
+
Parameters: 48960000
|
| 100 |
+
File Size: 196505510
|
| 101 |
+
Architecture:
|
| 102 |
+
- 1x1 Convolution
|
| 103 |
+
- Batch Normalization
|
| 104 |
+
- Convolution
|
| 105 |
+
- Global Average Pooling
|
| 106 |
+
- Grouped Convolution
|
| 107 |
+
- Max Pooling
|
| 108 |
+
- ReLU
|
| 109 |
+
- ResNeXt Block
|
| 110 |
+
- Residual Connection
|
| 111 |
+
- Softmax
|
| 112 |
+
- Squeeze-and-Excitation Block
|
| 113 |
+
Tasks:
|
| 114 |
+
- Image Classification
|
| 115 |
+
Training Data:
|
| 116 |
+
- ImageNet
|
| 117 |
+
ID: gluon_seresnext101_32x4d
|
| 118 |
+
Crop Pct: '0.875'
|
| 119 |
+
Image Size: '224'
|
| 120 |
+
Interpolation: bicubic
|
| 121 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L219
|
| 122 |
+
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth
|
| 123 |
+
Results:
|
| 124 |
+
- Task: Image Classification
|
| 125 |
+
Dataset: ImageNet
|
| 126 |
+
Metrics:
|
| 127 |
+
Top 1 Accuracy: 80.87%
|
| 128 |
+
Top 5 Accuracy: 95.29%
|
| 129 |
+
- Name: gluon_seresnext101_64x4d
|
| 130 |
+
In Collection: Gloun SEResNeXt
|
| 131 |
+
Metadata:
|
| 132 |
+
FLOPs: 19958950640
|
| 133 |
+
Parameters: 88230000
|
| 134 |
+
File Size: 353875948
|
| 135 |
+
Architecture:
|
| 136 |
+
- 1x1 Convolution
|
| 137 |
+
- Batch Normalization
|
| 138 |
+
- Convolution
|
| 139 |
+
- Global Average Pooling
|
| 140 |
+
- Grouped Convolution
|
| 141 |
+
- Max Pooling
|
| 142 |
+
- ReLU
|
| 143 |
+
- ResNeXt Block
|
| 144 |
+
- Residual Connection
|
| 145 |
+
- Softmax
|
| 146 |
+
- Squeeze-and-Excitation Block
|
| 147 |
+
Tasks:
|
| 148 |
+
- Image Classification
|
| 149 |
+
Training Data:
|
| 150 |
+
- ImageNet
|
| 151 |
+
ID: gluon_seresnext101_64x4d
|
| 152 |
+
Crop Pct: '0.875'
|
| 153 |
+
Image Size: '224'
|
| 154 |
+
Interpolation: bicubic
|
| 155 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L229
|
| 156 |
+
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth
|
| 157 |
+
Results:
|
| 158 |
+
- Task: Image Classification
|
| 159 |
+
Dataset: ImageNet
|
| 160 |
+
Metrics:
|
| 161 |
+
Top 1 Accuracy: 80.88%
|
| 162 |
+
Top 5 Accuracy: 95.31%
|
| 163 |
+
- Name: gluon_seresnext50_32x4d
|
| 164 |
+
In Collection: Gloun SEResNeXt
|
| 165 |
+
Metadata:
|
| 166 |
+
FLOPs: 5475179184
|
| 167 |
+
Parameters: 27560000
|
| 168 |
+
File Size: 110578827
|
| 169 |
+
Architecture:
|
| 170 |
+
- 1x1 Convolution
|
| 171 |
+
- Batch Normalization
|
| 172 |
+
- Convolution
|
| 173 |
+
- Global Average Pooling
|
| 174 |
+
- Grouped Convolution
|
| 175 |
+
- Max Pooling
|
| 176 |
+
- ReLU
|
| 177 |
+
- ResNeXt Block
|
| 178 |
+
- Residual Connection
|
| 179 |
+
- Softmax
|
| 180 |
+
- Squeeze-and-Excitation Block
|
| 181 |
+
Tasks:
|
| 182 |
+
- Image Classification
|
| 183 |
+
Training Data:
|
| 184 |
+
- ImageNet
|
| 185 |
+
ID: gluon_seresnext50_32x4d
|
| 186 |
+
Crop Pct: '0.875'
|
| 187 |
+
Image Size: '224'
|
| 188 |
+
Interpolation: bicubic
|
| 189 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L209
|
| 190 |
+
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth
|
| 191 |
+
Results:
|
| 192 |
+
- Task: Image Classification
|
| 193 |
+
Dataset: ImageNet
|
| 194 |
+
Metrics:
|
| 195 |
+
Top 1 Accuracy: 79.92%
|
| 196 |
+
Top 5 Accuracy: 94.82%
|
| 197 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/inception-resnet-v2.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Inception ResNet v2
|
| 2 |
+
|
| 3 |
+
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('inception_resnet_v2', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{szegedy2016inceptionv4,
|
| 76 |
+
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
|
| 77 |
+
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
|
| 78 |
+
year={2016},
|
| 79 |
+
eprint={1602.07261},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: Inception ResNet v2
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
|
| 91 |
+
Learning
|
| 92 |
+
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
|
| 93 |
+
Models:
|
| 94 |
+
- Name: inception_resnet_v2
|
| 95 |
+
In Collection: Inception ResNet v2
|
| 96 |
+
Metadata:
|
| 97 |
+
FLOPs: 16959133120
|
| 98 |
+
Parameters: 55850000
|
| 99 |
+
File Size: 223774238
|
| 100 |
+
Architecture:
|
| 101 |
+
- Average Pooling
|
| 102 |
+
- Dropout
|
| 103 |
+
- Inception-ResNet-v2 Reduction-B
|
| 104 |
+
- Inception-ResNet-v2-A
|
| 105 |
+
- Inception-ResNet-v2-B
|
| 106 |
+
- Inception-ResNet-v2-C
|
| 107 |
+
- Reduction-A
|
| 108 |
+
- Softmax
|
| 109 |
+
Tasks:
|
| 110 |
+
- Image Classification
|
| 111 |
+
Training Techniques:
|
| 112 |
+
- Label Smoothing
|
| 113 |
+
- RMSProp
|
| 114 |
+
- Weight Decay
|
| 115 |
+
Training Data:
|
| 116 |
+
- ImageNet
|
| 117 |
+
Training Resources: 20x NVIDIA Kepler GPUs
|
| 118 |
+
ID: inception_resnet_v2
|
| 119 |
+
LR: 0.045
|
| 120 |
+
Dropout: 0.2
|
| 121 |
+
Crop Pct: '0.897'
|
| 122 |
+
Momentum: 0.9
|
| 123 |
+
Image Size: '299'
|
| 124 |
+
Interpolation: bicubic
|
| 125 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
|
| 126 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
|
| 127 |
+
Results:
|
| 128 |
+
- Task: Image Classification
|
| 129 |
+
Dataset: ImageNet
|
| 130 |
+
Metrics:
|
| 131 |
+
Top 1 Accuracy: 0.95%
|
| 132 |
+
Top 5 Accuracy: 17.29%
|
| 133 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/inception-v3.md
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Inception v3
|
| 2 |
+
|
| 3 |
+
**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('inception_v3', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `inception_v3`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{DBLP:journals/corr/SzegedyVISW15,
|
| 76 |
+
author = {Christian Szegedy and
|
| 77 |
+
Vincent Vanhoucke and
|
| 78 |
+
Sergey Ioffe and
|
| 79 |
+
Jonathon Shlens and
|
| 80 |
+
Zbigniew Wojna},
|
| 81 |
+
title = {Rethinking the Inception Architecture for Computer Vision},
|
| 82 |
+
journal = {CoRR},
|
| 83 |
+
volume = {abs/1512.00567},
|
| 84 |
+
year = {2015},
|
| 85 |
+
url = {http://arxiv.org/abs/1512.00567},
|
| 86 |
+
archivePrefix = {arXiv},
|
| 87 |
+
eprint = {1512.00567},
|
| 88 |
+
timestamp = {Mon, 13 Aug 2018 16:49:07 +0200},
|
| 89 |
+
biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib},
|
| 90 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 91 |
+
}
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
<!--
|
| 95 |
+
Type: model-index
|
| 96 |
+
Collections:
|
| 97 |
+
- Name: Inception v3
|
| 98 |
+
Paper:
|
| 99 |
+
Title: Rethinking the Inception Architecture for Computer Vision
|
| 100 |
+
URL: https://paperswithcode.com/paper/rethinking-the-inception-architecture-for
|
| 101 |
+
Models:
|
| 102 |
+
- Name: inception_v3
|
| 103 |
+
In Collection: Inception v3
|
| 104 |
+
Metadata:
|
| 105 |
+
FLOPs: 7352418880
|
| 106 |
+
Parameters: 23830000
|
| 107 |
+
File Size: 108857766
|
| 108 |
+
Architecture:
|
| 109 |
+
- 1x1 Convolution
|
| 110 |
+
- Auxiliary Classifier
|
| 111 |
+
- Average Pooling
|
| 112 |
+
- Average Pooling
|
| 113 |
+
- Batch Normalization
|
| 114 |
+
- Convolution
|
| 115 |
+
- Dense Connections
|
| 116 |
+
- Dropout
|
| 117 |
+
- Inception-v3 Module
|
| 118 |
+
- Max Pooling
|
| 119 |
+
- ReLU
|
| 120 |
+
- Softmax
|
| 121 |
+
Tasks:
|
| 122 |
+
- Image Classification
|
| 123 |
+
Training Techniques:
|
| 124 |
+
- Gradient Clipping
|
| 125 |
+
- Label Smoothing
|
| 126 |
+
- RMSProp
|
| 127 |
+
- Weight Decay
|
| 128 |
+
Training Data:
|
| 129 |
+
- ImageNet
|
| 130 |
+
Training Resources: 50x NVIDIA Kepler GPUs
|
| 131 |
+
ID: inception_v3
|
| 132 |
+
LR: 0.045
|
| 133 |
+
Dropout: 0.2
|
| 134 |
+
Crop Pct: '0.875'
|
| 135 |
+
Momentum: 0.9
|
| 136 |
+
Image Size: '299'
|
| 137 |
+
Interpolation: bicubic
|
| 138 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L442
|
| 139 |
+
Weights: https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth
|
| 140 |
+
Results:
|
| 141 |
+
- Task: Image Classification
|
| 142 |
+
Dataset: ImageNet
|
| 143 |
+
Metrics:
|
| 144 |
+
Top 1 Accuracy: 77.46%
|
| 145 |
+
Top 5 Accuracy: 93.48%
|
| 146 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/legacy-se-resnet.md
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# (Legacy) SE-ResNet
|
| 2 |
+
|
| 3 |
+
**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('legacy_seresnet101', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `legacy_seresnet101`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('legacy_seresnet101', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{hu2019squeezeandexcitation,
|
| 76 |
+
title={Squeeze-and-Excitation Networks},
|
| 77 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1709.01507},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: Legacy SE ResNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Squeeze-and-Excitation Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: legacy_seresnet101
|
| 94 |
+
In Collection: Legacy SE ResNet
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 9762614000
|
| 97 |
+
Parameters: 49330000
|
| 98 |
+
File Size: 197822624
|
| 99 |
+
Architecture:
|
| 100 |
+
- 1x1 Convolution
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Bottleneck Residual Block
|
| 103 |
+
- Convolution
|
| 104 |
+
- Global Average Pooling
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- ReLU
|
| 107 |
+
- Residual Block
|
| 108 |
+
- Residual Connection
|
| 109 |
+
- Softmax
|
| 110 |
+
- Squeeze-and-Excitation Block
|
| 111 |
+
Tasks:
|
| 112 |
+
- Image Classification
|
| 113 |
+
Training Techniques:
|
| 114 |
+
- Label Smoothing
|
| 115 |
+
- SGD with Momentum
|
| 116 |
+
- Weight Decay
|
| 117 |
+
Training Data:
|
| 118 |
+
- ImageNet
|
| 119 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 120 |
+
ID: legacy_seresnet101
|
| 121 |
+
LR: 0.6
|
| 122 |
+
Epochs: 100
|
| 123 |
+
Layers: 101
|
| 124 |
+
Dropout: 0.2
|
| 125 |
+
Crop Pct: '0.875'
|
| 126 |
+
Momentum: 0.9
|
| 127 |
+
Batch Size: 1024
|
| 128 |
+
Image Size: '224'
|
| 129 |
+
Interpolation: bilinear
|
| 130 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L426
|
| 131 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth
|
| 132 |
+
Results:
|
| 133 |
+
- Task: Image Classification
|
| 134 |
+
Dataset: ImageNet
|
| 135 |
+
Metrics:
|
| 136 |
+
Top 1 Accuracy: 78.38%
|
| 137 |
+
Top 5 Accuracy: 94.26%
|
| 138 |
+
- Name: legacy_seresnet152
|
| 139 |
+
In Collection: Legacy SE ResNet
|
| 140 |
+
Metadata:
|
| 141 |
+
FLOPs: 14553578160
|
| 142 |
+
Parameters: 66819999
|
| 143 |
+
File Size: 268033864
|
| 144 |
+
Architecture:
|
| 145 |
+
- 1x1 Convolution
|
| 146 |
+
- Batch Normalization
|
| 147 |
+
- Bottleneck Residual Block
|
| 148 |
+
- Convolution
|
| 149 |
+
- Global Average Pooling
|
| 150 |
+
- Max Pooling
|
| 151 |
+
- ReLU
|
| 152 |
+
- Residual Block
|
| 153 |
+
- Residual Connection
|
| 154 |
+
- Softmax
|
| 155 |
+
- Squeeze-and-Excitation Block
|
| 156 |
+
Tasks:
|
| 157 |
+
- Image Classification
|
| 158 |
+
Training Techniques:
|
| 159 |
+
- Label Smoothing
|
| 160 |
+
- SGD with Momentum
|
| 161 |
+
- Weight Decay
|
| 162 |
+
Training Data:
|
| 163 |
+
- ImageNet
|
| 164 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 165 |
+
ID: legacy_seresnet152
|
| 166 |
+
LR: 0.6
|
| 167 |
+
Epochs: 100
|
| 168 |
+
Layers: 152
|
| 169 |
+
Dropout: 0.2
|
| 170 |
+
Crop Pct: '0.875'
|
| 171 |
+
Momentum: 0.9
|
| 172 |
+
Batch Size: 1024
|
| 173 |
+
Image Size: '224'
|
| 174 |
+
Interpolation: bilinear
|
| 175 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L433
|
| 176 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth
|
| 177 |
+
Results:
|
| 178 |
+
- Task: Image Classification
|
| 179 |
+
Dataset: ImageNet
|
| 180 |
+
Metrics:
|
| 181 |
+
Top 1 Accuracy: 78.67%
|
| 182 |
+
Top 5 Accuracy: 94.38%
|
| 183 |
+
- Name: legacy_seresnet18
|
| 184 |
+
In Collection: Legacy SE ResNet
|
| 185 |
+
Metadata:
|
| 186 |
+
FLOPs: 2328876024
|
| 187 |
+
Parameters: 11780000
|
| 188 |
+
File Size: 47175663
|
| 189 |
+
Architecture:
|
| 190 |
+
- 1x1 Convolution
|
| 191 |
+
- Batch Normalization
|
| 192 |
+
- Bottleneck Residual Block
|
| 193 |
+
- Convolution
|
| 194 |
+
- Global Average Pooling
|
| 195 |
+
- Max Pooling
|
| 196 |
+
- ReLU
|
| 197 |
+
- Residual Block
|
| 198 |
+
- Residual Connection
|
| 199 |
+
- Softmax
|
| 200 |
+
- Squeeze-and-Excitation Block
|
| 201 |
+
Tasks:
|
| 202 |
+
- Image Classification
|
| 203 |
+
Training Techniques:
|
| 204 |
+
- Label Smoothing
|
| 205 |
+
- SGD with Momentum
|
| 206 |
+
- Weight Decay
|
| 207 |
+
Training Data:
|
| 208 |
+
- ImageNet
|
| 209 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 210 |
+
ID: legacy_seresnet18
|
| 211 |
+
LR: 0.6
|
| 212 |
+
Epochs: 100
|
| 213 |
+
Layers: 18
|
| 214 |
+
Dropout: 0.2
|
| 215 |
+
Crop Pct: '0.875'
|
| 216 |
+
Momentum: 0.9
|
| 217 |
+
Batch Size: 1024
|
| 218 |
+
Image Size: '224'
|
| 219 |
+
Interpolation: bicubic
|
| 220 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L405
|
| 221 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth
|
| 222 |
+
Results:
|
| 223 |
+
- Task: Image Classification
|
| 224 |
+
Dataset: ImageNet
|
| 225 |
+
Metrics:
|
| 226 |
+
Top 1 Accuracy: 71.74%
|
| 227 |
+
Top 5 Accuracy: 90.34%
|
| 228 |
+
- Name: legacy_seresnet34
|
| 229 |
+
In Collection: Legacy SE ResNet
|
| 230 |
+
Metadata:
|
| 231 |
+
FLOPs: 4706201004
|
| 232 |
+
Parameters: 21960000
|
| 233 |
+
File Size: 87958697
|
| 234 |
+
Architecture:
|
| 235 |
+
- 1x1 Convolution
|
| 236 |
+
- Batch Normalization
|
| 237 |
+
- Bottleneck Residual Block
|
| 238 |
+
- Convolution
|
| 239 |
+
- Global Average Pooling
|
| 240 |
+
- Max Pooling
|
| 241 |
+
- ReLU
|
| 242 |
+
- Residual Block
|
| 243 |
+
- Residual Connection
|
| 244 |
+
- Softmax
|
| 245 |
+
- Squeeze-and-Excitation Block
|
| 246 |
+
Tasks:
|
| 247 |
+
- Image Classification
|
| 248 |
+
Training Techniques:
|
| 249 |
+
- Label Smoothing
|
| 250 |
+
- SGD with Momentum
|
| 251 |
+
- Weight Decay
|
| 252 |
+
Training Data:
|
| 253 |
+
- ImageNet
|
| 254 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 255 |
+
ID: legacy_seresnet34
|
| 256 |
+
LR: 0.6
|
| 257 |
+
Epochs: 100
|
| 258 |
+
Layers: 34
|
| 259 |
+
Dropout: 0.2
|
| 260 |
+
Crop Pct: '0.875'
|
| 261 |
+
Momentum: 0.9
|
| 262 |
+
Batch Size: 1024
|
| 263 |
+
Image Size: '224'
|
| 264 |
+
Interpolation: bilinear
|
| 265 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L412
|
| 266 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth
|
| 267 |
+
Results:
|
| 268 |
+
- Task: Image Classification
|
| 269 |
+
Dataset: ImageNet
|
| 270 |
+
Metrics:
|
| 271 |
+
Top 1 Accuracy: 74.79%
|
| 272 |
+
Top 5 Accuracy: 92.13%
|
| 273 |
+
- Name: legacy_seresnet50
|
| 274 |
+
In Collection: Legacy SE ResNet
|
| 275 |
+
Metadata:
|
| 276 |
+
FLOPs: 4974351024
|
| 277 |
+
Parameters: 28090000
|
| 278 |
+
File Size: 112611220
|
| 279 |
+
Architecture:
|
| 280 |
+
- 1x1 Convolution
|
| 281 |
+
- Batch Normalization
|
| 282 |
+
- Bottleneck Residual Block
|
| 283 |
+
- Convolution
|
| 284 |
+
- Global Average Pooling
|
| 285 |
+
- Max Pooling
|
| 286 |
+
- ReLU
|
| 287 |
+
- Residual Block
|
| 288 |
+
- Residual Connection
|
| 289 |
+
- Softmax
|
| 290 |
+
- Squeeze-and-Excitation Block
|
| 291 |
+
Tasks:
|
| 292 |
+
- Image Classification
|
| 293 |
+
Training Techniques:
|
| 294 |
+
- Label Smoothing
|
| 295 |
+
- SGD with Momentum
|
| 296 |
+
- Weight Decay
|
| 297 |
+
Training Data:
|
| 298 |
+
- ImageNet
|
| 299 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 300 |
+
ID: legacy_seresnet50
|
| 301 |
+
LR: 0.6
|
| 302 |
+
Epochs: 100
|
| 303 |
+
Layers: 50
|
| 304 |
+
Dropout: 0.2
|
| 305 |
+
Crop Pct: '0.875'
|
| 306 |
+
Momentum: 0.9
|
| 307 |
+
Image Size: '224'
|
| 308 |
+
Interpolation: bilinear
|
| 309 |
+
Minibatch Size: 1024
|
| 310 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L419
|
| 311 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth
|
| 312 |
+
Results:
|
| 313 |
+
- Task: Image Classification
|
| 314 |
+
Dataset: ImageNet
|
| 315 |
+
Metrics:
|
| 316 |
+
Top 1 Accuracy: 77.64%
|
| 317 |
+
Top 5 Accuracy: 93.74%
|
| 318 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/legacy-senet.md
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# (Legacy) SENet
|
| 2 |
+
|
| 3 |
+
A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
The weights from this model were ported from Gluon.
|
| 6 |
+
|
| 7 |
+
## How do I use this model on an image?
|
| 8 |
+
To load a pretrained model:
|
| 9 |
+
|
| 10 |
+
```python
|
| 11 |
+
import timm
|
| 12 |
+
model = timm.create_model('legacy_senet154', pretrained=True)
|
| 13 |
+
model.eval()
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
To load and preprocess the image:
|
| 17 |
+
```python
|
| 18 |
+
import urllib
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from timm.data import resolve_data_config
|
| 21 |
+
from timm.data.transforms_factory import create_transform
|
| 22 |
+
|
| 23 |
+
config = resolve_data_config({}, model=model)
|
| 24 |
+
transform = create_transform(**config)
|
| 25 |
+
|
| 26 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 27 |
+
urllib.request.urlretrieve(url, filename)
|
| 28 |
+
img = Image.open(filename).convert('RGB')
|
| 29 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
To get the model predictions:
|
| 33 |
+
```python
|
| 34 |
+
import torch
|
| 35 |
+
with torch.no_grad():
|
| 36 |
+
out = model(tensor)
|
| 37 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 38 |
+
print(probabilities.shape)
|
| 39 |
+
# prints: torch.Size([1000])
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
To get the top-5 predictions class names:
|
| 43 |
+
```python
|
| 44 |
+
# Get imagenet class mappings
|
| 45 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 46 |
+
urllib.request.urlretrieve(url, filename)
|
| 47 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 48 |
+
categories = [s.strip() for s in f.readlines()]
|
| 49 |
+
|
| 50 |
+
# Print top categories per image
|
| 51 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 52 |
+
for i in range(top5_prob.size(0)):
|
| 53 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 54 |
+
# prints class names and probabilities like:
|
| 55 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
Replace the model name with the variant you want to use, e.g. `legacy_senet154`. You can find the IDs in the model summaries at the top of this page.
|
| 59 |
+
|
| 60 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 61 |
+
|
| 62 |
+
## How do I finetune this model?
|
| 63 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 64 |
+
```python
|
| 65 |
+
model = timm.create_model('legacy_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 66 |
+
```
|
| 67 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 68 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 69 |
+
|
| 70 |
+
## How do I train this model?
|
| 71 |
+
|
| 72 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 73 |
+
|
| 74 |
+
## Citation
|
| 75 |
+
|
| 76 |
+
```BibTeX
|
| 77 |
+
@misc{hu2019squeezeandexcitation,
|
| 78 |
+
title={Squeeze-and-Excitation Networks},
|
| 79 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 80 |
+
year={2019},
|
| 81 |
+
eprint={1709.01507},
|
| 82 |
+
archivePrefix={arXiv},
|
| 83 |
+
primaryClass={cs.CV}
|
| 84 |
+
}
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
<!--
|
| 88 |
+
Type: model-index
|
| 89 |
+
Collections:
|
| 90 |
+
- Name: Legacy SENet
|
| 91 |
+
Paper:
|
| 92 |
+
Title: Squeeze-and-Excitation Networks
|
| 93 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 94 |
+
Models:
|
| 95 |
+
- Name: legacy_senet154
|
| 96 |
+
In Collection: Legacy SENet
|
| 97 |
+
Metadata:
|
| 98 |
+
FLOPs: 26659556016
|
| 99 |
+
Parameters: 115090000
|
| 100 |
+
File Size: 461488402
|
| 101 |
+
Architecture:
|
| 102 |
+
- Convolution
|
| 103 |
+
- Dense Connections
|
| 104 |
+
- Global Average Pooling
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- Softmax
|
| 107 |
+
- Squeeze-and-Excitation Block
|
| 108 |
+
Tasks:
|
| 109 |
+
- Image Classification
|
| 110 |
+
Training Techniques:
|
| 111 |
+
- Label Smoothing
|
| 112 |
+
- SGD with Momentum
|
| 113 |
+
- Weight Decay
|
| 114 |
+
Training Data:
|
| 115 |
+
- ImageNet
|
| 116 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 117 |
+
ID: legacy_senet154
|
| 118 |
+
LR: 0.6
|
| 119 |
+
Epochs: 100
|
| 120 |
+
Layers: 154
|
| 121 |
+
Dropout: 0.2
|
| 122 |
+
Crop Pct: '0.875'
|
| 123 |
+
Momentum: 0.9
|
| 124 |
+
Batch Size: 1024
|
| 125 |
+
Image Size: '224'
|
| 126 |
+
Interpolation: bilinear
|
| 127 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L440
|
| 128 |
+
Weights: http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth
|
| 129 |
+
Results:
|
| 130 |
+
- Task: Image Classification
|
| 131 |
+
Dataset: ImageNet
|
| 132 |
+
Metrics:
|
| 133 |
+
Top 1 Accuracy: 81.33%
|
| 134 |
+
Top 5 Accuracy: 95.51%
|
| 135 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/mobilenet-v2.md
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MobileNet v2
|
| 2 |
+
|
| 3 |
+
**MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('mobilenetv2_100', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{DBLP:journals/corr/abs-1801-04381,
|
| 76 |
+
author = {Mark Sandler and
|
| 77 |
+
Andrew G. Howard and
|
| 78 |
+
Menglong Zhu and
|
| 79 |
+
Andrey Zhmoginov and
|
| 80 |
+
Liang{-}Chieh Chen},
|
| 81 |
+
title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
|
| 82 |
+
Detection and Segmentation},
|
| 83 |
+
journal = {CoRR},
|
| 84 |
+
volume = {abs/1801.04381},
|
| 85 |
+
year = {2018},
|
| 86 |
+
url = {http://arxiv.org/abs/1801.04381},
|
| 87 |
+
archivePrefix = {arXiv},
|
| 88 |
+
eprint = {1801.04381},
|
| 89 |
+
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
|
| 90 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
|
| 91 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 92 |
+
}
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
<!--
|
| 96 |
+
Type: model-index
|
| 97 |
+
Collections:
|
| 98 |
+
- Name: MobileNet V2
|
| 99 |
+
Paper:
|
| 100 |
+
Title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks'
|
| 101 |
+
URL: https://paperswithcode.com/paper/mobilenetv2-inverted-residuals-and-linear
|
| 102 |
+
Models:
|
| 103 |
+
- Name: mobilenetv2_100
|
| 104 |
+
In Collection: MobileNet V2
|
| 105 |
+
Metadata:
|
| 106 |
+
FLOPs: 401920448
|
| 107 |
+
Parameters: 3500000
|
| 108 |
+
File Size: 14202571
|
| 109 |
+
Architecture:
|
| 110 |
+
- 1x1 Convolution
|
| 111 |
+
- Batch Normalization
|
| 112 |
+
- Convolution
|
| 113 |
+
- Depthwise Separable Convolution
|
| 114 |
+
- Dropout
|
| 115 |
+
- Inverted Residual Block
|
| 116 |
+
- Max Pooling
|
| 117 |
+
- ReLU6
|
| 118 |
+
- Residual Connection
|
| 119 |
+
- Softmax
|
| 120 |
+
Tasks:
|
| 121 |
+
- Image Classification
|
| 122 |
+
Training Techniques:
|
| 123 |
+
- RMSProp
|
| 124 |
+
- Weight Decay
|
| 125 |
+
Training Data:
|
| 126 |
+
- ImageNet
|
| 127 |
+
Training Resources: 16x GPUs
|
| 128 |
+
ID: mobilenetv2_100
|
| 129 |
+
LR: 0.045
|
| 130 |
+
Crop Pct: '0.875'
|
| 131 |
+
Momentum: 0.9
|
| 132 |
+
Batch Size: 1536
|
| 133 |
+
Image Size: '224'
|
| 134 |
+
Weight Decay: 4.0e-05
|
| 135 |
+
Interpolation: bicubic
|
| 136 |
+
RMSProp Decay: 0.9
|
| 137 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L955
|
| 138 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth
|
| 139 |
+
Results:
|
| 140 |
+
- Task: Image Classification
|
| 141 |
+
Dataset: ImageNet
|
| 142 |
+
Metrics:
|
| 143 |
+
Top 1 Accuracy: 72.95%
|
| 144 |
+
Top 5 Accuracy: 91.0%
|
| 145 |
+
- Name: mobilenetv2_110d
|
| 146 |
+
In Collection: MobileNet V2
|
| 147 |
+
Metadata:
|
| 148 |
+
FLOPs: 573958832
|
| 149 |
+
Parameters: 4520000
|
| 150 |
+
File Size: 18316431
|
| 151 |
+
Architecture:
|
| 152 |
+
- 1x1 Convolution
|
| 153 |
+
- Batch Normalization
|
| 154 |
+
- Convolution
|
| 155 |
+
- Depthwise Separable Convolution
|
| 156 |
+
- Dropout
|
| 157 |
+
- Inverted Residual Block
|
| 158 |
+
- Max Pooling
|
| 159 |
+
- ReLU6
|
| 160 |
+
- Residual Connection
|
| 161 |
+
- Softmax
|
| 162 |
+
Tasks:
|
| 163 |
+
- Image Classification
|
| 164 |
+
Training Techniques:
|
| 165 |
+
- RMSProp
|
| 166 |
+
- Weight Decay
|
| 167 |
+
Training Data:
|
| 168 |
+
- ImageNet
|
| 169 |
+
Training Resources: 16x GPUs
|
| 170 |
+
ID: mobilenetv2_110d
|
| 171 |
+
LR: 0.045
|
| 172 |
+
Crop Pct: '0.875'
|
| 173 |
+
Momentum: 0.9
|
| 174 |
+
Batch Size: 1536
|
| 175 |
+
Image Size: '224'
|
| 176 |
+
Weight Decay: 4.0e-05
|
| 177 |
+
Interpolation: bicubic
|
| 178 |
+
RMSProp Decay: 0.9
|
| 179 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L969
|
| 180 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth
|
| 181 |
+
Results:
|
| 182 |
+
- Task: Image Classification
|
| 183 |
+
Dataset: ImageNet
|
| 184 |
+
Metrics:
|
| 185 |
+
Top 1 Accuracy: 75.05%
|
| 186 |
+
Top 5 Accuracy: 92.19%
|
| 187 |
+
- Name: mobilenetv2_120d
|
| 188 |
+
In Collection: MobileNet V2
|
| 189 |
+
Metadata:
|
| 190 |
+
FLOPs: 888510048
|
| 191 |
+
Parameters: 5830000
|
| 192 |
+
File Size: 23651121
|
| 193 |
+
Architecture:
|
| 194 |
+
- 1x1 Convolution
|
| 195 |
+
- Batch Normalization
|
| 196 |
+
- Convolution
|
| 197 |
+
- Depthwise Separable Convolution
|
| 198 |
+
- Dropout
|
| 199 |
+
- Inverted Residual Block
|
| 200 |
+
- Max Pooling
|
| 201 |
+
- ReLU6
|
| 202 |
+
- Residual Connection
|
| 203 |
+
- Softmax
|
| 204 |
+
Tasks:
|
| 205 |
+
- Image Classification
|
| 206 |
+
Training Techniques:
|
| 207 |
+
- RMSProp
|
| 208 |
+
- Weight Decay
|
| 209 |
+
Training Data:
|
| 210 |
+
- ImageNet
|
| 211 |
+
Training Resources: 16x GPUs
|
| 212 |
+
ID: mobilenetv2_120d
|
| 213 |
+
LR: 0.045
|
| 214 |
+
Crop Pct: '0.875'
|
| 215 |
+
Momentum: 0.9
|
| 216 |
+
Batch Size: 1536
|
| 217 |
+
Image Size: '224'
|
| 218 |
+
Weight Decay: 4.0e-05
|
| 219 |
+
Interpolation: bicubic
|
| 220 |
+
RMSProp Decay: 0.9
|
| 221 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L977
|
| 222 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth
|
| 223 |
+
Results:
|
| 224 |
+
- Task: Image Classification
|
| 225 |
+
Dataset: ImageNet
|
| 226 |
+
Metrics:
|
| 227 |
+
Top 1 Accuracy: 77.28%
|
| 228 |
+
Top 5 Accuracy: 93.51%
|
| 229 |
+
- Name: mobilenetv2_140
|
| 230 |
+
In Collection: MobileNet V2
|
| 231 |
+
Metadata:
|
| 232 |
+
FLOPs: 770196784
|
| 233 |
+
Parameters: 6110000
|
| 234 |
+
File Size: 24673555
|
| 235 |
+
Architecture:
|
| 236 |
+
- 1x1 Convolution
|
| 237 |
+
- Batch Normalization
|
| 238 |
+
- Convolution
|
| 239 |
+
- Depthwise Separable Convolution
|
| 240 |
+
- Dropout
|
| 241 |
+
- Inverted Residual Block
|
| 242 |
+
- Max Pooling
|
| 243 |
+
- ReLU6
|
| 244 |
+
- Residual Connection
|
| 245 |
+
- Softmax
|
| 246 |
+
Tasks:
|
| 247 |
+
- Image Classification
|
| 248 |
+
Training Techniques:
|
| 249 |
+
- RMSProp
|
| 250 |
+
- Weight Decay
|
| 251 |
+
Training Data:
|
| 252 |
+
- ImageNet
|
| 253 |
+
Training Resources: 16x GPUs
|
| 254 |
+
ID: mobilenetv2_140
|
| 255 |
+
LR: 0.045
|
| 256 |
+
Crop Pct: '0.875'
|
| 257 |
+
Momentum: 0.9
|
| 258 |
+
Batch Size: 1536
|
| 259 |
+
Image Size: '224'
|
| 260 |
+
Weight Decay: 4.0e-05
|
| 261 |
+
Interpolation: bicubic
|
| 262 |
+
RMSProp Decay: 0.9
|
| 263 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L962
|
| 264 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth
|
| 265 |
+
Results:
|
| 266 |
+
- Task: Image Classification
|
| 267 |
+
Dataset: ImageNet
|
| 268 |
+
Metrics:
|
| 269 |
+
Top 1 Accuracy: 76.51%
|
| 270 |
+
Top 5 Accuracy: 93.0%
|
| 271 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/nasnet.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NASNet
|
| 2 |
+
|
| 3 |
+
**NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('nasnetalarge', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{zoph2018learning,
|
| 76 |
+
title={Learning Transferable Architectures for Scalable Image Recognition},
|
| 77 |
+
author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le},
|
| 78 |
+
year={2018},
|
| 79 |
+
eprint={1707.07012},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: NASNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Learning Transferable Architectures for Scalable Image Recognition
|
| 91 |
+
URL: https://paperswithcode.com/paper/learning-transferable-architectures-for
|
| 92 |
+
Models:
|
| 93 |
+
- Name: nasnetalarge
|
| 94 |
+
In Collection: NASNet
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 30242402862
|
| 97 |
+
Parameters: 88750000
|
| 98 |
+
File Size: 356056626
|
| 99 |
+
Architecture:
|
| 100 |
+
- Average Pooling
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Convolution
|
| 103 |
+
- Depthwise Separable Convolution
|
| 104 |
+
- Dropout
|
| 105 |
+
- ReLU
|
| 106 |
+
Tasks:
|
| 107 |
+
- Image Classification
|
| 108 |
+
Training Techniques:
|
| 109 |
+
- Label Smoothing
|
| 110 |
+
- RMSProp
|
| 111 |
+
- Weight Decay
|
| 112 |
+
Training Data:
|
| 113 |
+
- ImageNet
|
| 114 |
+
Training Resources: 50x Tesla K40 GPUs
|
| 115 |
+
ID: nasnetalarge
|
| 116 |
+
Dropout: 0.5
|
| 117 |
+
Crop Pct: '0.911'
|
| 118 |
+
Momentum: 0.9
|
| 119 |
+
Image Size: '331'
|
| 120 |
+
Interpolation: bicubic
|
| 121 |
+
Label Smoothing: 0.1
|
| 122 |
+
RMSProp $\epsilon$: 1.0
|
| 123 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562
|
| 124 |
+
Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth
|
| 125 |
+
Results:
|
| 126 |
+
- Task: Image Classification
|
| 127 |
+
Dataset: ImageNet
|
| 128 |
+
Metrics:
|
| 129 |
+
Top 1 Accuracy: 82.63%
|
| 130 |
+
Top 5 Accuracy: 96.05%
|
| 131 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/noisy-student.md
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Noisy Student (EfficientNet)
|
| 2 |
+
|
| 3 |
+
**Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training
|
| 4 |
+
and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps:
|
| 5 |
+
|
| 6 |
+
1. train a teacher model on labeled images
|
| 7 |
+
2. use the teacher to generate pseudo labels on unlabeled images
|
| 8 |
+
3. train a student model on the combination of labeled images and pseudo labeled images.
|
| 9 |
+
|
| 10 |
+
The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student.
|
| 11 |
+
|
| 12 |
+
Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training.
|
| 13 |
+
|
| 14 |
+
## How do I use this model on an image?
|
| 15 |
+
To load a pretrained model:
|
| 16 |
+
|
| 17 |
+
```python
|
| 18 |
+
import timm
|
| 19 |
+
model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True)
|
| 20 |
+
model.eval()
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
To load and preprocess the image:
|
| 24 |
+
```python
|
| 25 |
+
import urllib
|
| 26 |
+
from PIL import Image
|
| 27 |
+
from timm.data import resolve_data_config
|
| 28 |
+
from timm.data.transforms_factory import create_transform
|
| 29 |
+
|
| 30 |
+
config = resolve_data_config({}, model=model)
|
| 31 |
+
transform = create_transform(**config)
|
| 32 |
+
|
| 33 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 34 |
+
urllib.request.urlretrieve(url, filename)
|
| 35 |
+
img = Image.open(filename).convert('RGB')
|
| 36 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
To get the model predictions:
|
| 40 |
+
```python
|
| 41 |
+
import torch
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
out = model(tensor)
|
| 44 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 45 |
+
print(probabilities.shape)
|
| 46 |
+
# prints: torch.Size([1000])
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
To get the top-5 predictions class names:
|
| 50 |
+
```python
|
| 51 |
+
# Get imagenet class mappings
|
| 52 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 53 |
+
urllib.request.urlretrieve(url, filename)
|
| 54 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 55 |
+
categories = [s.strip() for s in f.readlines()]
|
| 56 |
+
|
| 57 |
+
# Print top categories per image
|
| 58 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 59 |
+
for i in range(top5_prob.size(0)):
|
| 60 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 61 |
+
# prints class names and probabilities like:
|
| 62 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page.
|
| 66 |
+
|
| 67 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 68 |
+
|
| 69 |
+
## How do I finetune this model?
|
| 70 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 71 |
+
```python
|
| 72 |
+
model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 73 |
+
```
|
| 74 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 75 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 76 |
+
|
| 77 |
+
## How do I train this model?
|
| 78 |
+
|
| 79 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 80 |
+
|
| 81 |
+
## Citation
|
| 82 |
+
|
| 83 |
+
```BibTeX
|
| 84 |
+
@misc{xie2020selftraining,
|
| 85 |
+
title={Self-training with Noisy Student improves ImageNet classification},
|
| 86 |
+
author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le},
|
| 87 |
+
year={2020},
|
| 88 |
+
eprint={1911.04252},
|
| 89 |
+
archivePrefix={arXiv},
|
| 90 |
+
primaryClass={cs.LG}
|
| 91 |
+
}
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
<!--
|
| 95 |
+
Type: model-index
|
| 96 |
+
Collections:
|
| 97 |
+
- Name: Noisy Student
|
| 98 |
+
Paper:
|
| 99 |
+
Title: Self-training with Noisy Student improves ImageNet classification
|
| 100 |
+
URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves
|
| 101 |
+
Models:
|
| 102 |
+
- Name: tf_efficientnet_b0_ns
|
| 103 |
+
In Collection: Noisy Student
|
| 104 |
+
Metadata:
|
| 105 |
+
FLOPs: 488688572
|
| 106 |
+
Parameters: 5290000
|
| 107 |
+
File Size: 21386709
|
| 108 |
+
Architecture:
|
| 109 |
+
- 1x1 Convolution
|
| 110 |
+
- Average Pooling
|
| 111 |
+
- Batch Normalization
|
| 112 |
+
- Convolution
|
| 113 |
+
- Dense Connections
|
| 114 |
+
- Dropout
|
| 115 |
+
- Inverted Residual Block
|
| 116 |
+
- Squeeze-and-Excitation Block
|
| 117 |
+
- Swish
|
| 118 |
+
Tasks:
|
| 119 |
+
- Image Classification
|
| 120 |
+
Training Techniques:
|
| 121 |
+
- AutoAugment
|
| 122 |
+
- FixRes
|
| 123 |
+
- Label Smoothing
|
| 124 |
+
- Noisy Student
|
| 125 |
+
- RMSProp
|
| 126 |
+
- RandAugment
|
| 127 |
+
- Weight Decay
|
| 128 |
+
Training Data:
|
| 129 |
+
- ImageNet
|
| 130 |
+
- JFT-300M
|
| 131 |
+
Training Resources: Cloud TPU v3 Pod
|
| 132 |
+
ID: tf_efficientnet_b0_ns
|
| 133 |
+
LR: 0.128
|
| 134 |
+
Epochs: 700
|
| 135 |
+
Dropout: 0.5
|
| 136 |
+
Crop Pct: '0.875'
|
| 137 |
+
Momentum: 0.9
|
| 138 |
+
Batch Size: 2048
|
| 139 |
+
Image Size: '224'
|
| 140 |
+
Weight Decay: 1.0e-05
|
| 141 |
+
Interpolation: bicubic
|
| 142 |
+
RMSProp Decay: 0.9
|
| 143 |
+
Label Smoothing: 0.1
|
| 144 |
+
BatchNorm Momentum: 0.99
|
| 145 |
+
Stochastic Depth Survival: 0.8
|
| 146 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427
|
| 147 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth
|
| 148 |
+
Results:
|
| 149 |
+
- Task: Image Classification
|
| 150 |
+
Dataset: ImageNet
|
| 151 |
+
Metrics:
|
| 152 |
+
Top 1 Accuracy: 78.66%
|
| 153 |
+
Top 5 Accuracy: 94.37%
|
| 154 |
+
- Name: tf_efficientnet_b1_ns
|
| 155 |
+
In Collection: Noisy Student
|
| 156 |
+
Metadata:
|
| 157 |
+
FLOPs: 883633200
|
| 158 |
+
Parameters: 7790000
|
| 159 |
+
File Size: 31516408
|
| 160 |
+
Architecture:
|
| 161 |
+
- 1x1 Convolution
|
| 162 |
+
- Average Pooling
|
| 163 |
+
- Batch Normalization
|
| 164 |
+
- Convolution
|
| 165 |
+
- Dense Connections
|
| 166 |
+
- Dropout
|
| 167 |
+
- Inverted Residual Block
|
| 168 |
+
- Squeeze-and-Excitation Block
|
| 169 |
+
- Swish
|
| 170 |
+
Tasks:
|
| 171 |
+
- Image Classification
|
| 172 |
+
Training Techniques:
|
| 173 |
+
- AutoAugment
|
| 174 |
+
- FixRes
|
| 175 |
+
- Label Smoothing
|
| 176 |
+
- Noisy Student
|
| 177 |
+
- RMSProp
|
| 178 |
+
- RandAugment
|
| 179 |
+
- Weight Decay
|
| 180 |
+
Training Data:
|
| 181 |
+
- ImageNet
|
| 182 |
+
- JFT-300M
|
| 183 |
+
Training Resources: Cloud TPU v3 Pod
|
| 184 |
+
ID: tf_efficientnet_b1_ns
|
| 185 |
+
LR: 0.128
|
| 186 |
+
Epochs: 700
|
| 187 |
+
Dropout: 0.5
|
| 188 |
+
Crop Pct: '0.882'
|
| 189 |
+
Momentum: 0.9
|
| 190 |
+
Batch Size: 2048
|
| 191 |
+
Image Size: '240'
|
| 192 |
+
Weight Decay: 1.0e-05
|
| 193 |
+
Interpolation: bicubic
|
| 194 |
+
RMSProp Decay: 0.9
|
| 195 |
+
Label Smoothing: 0.1
|
| 196 |
+
BatchNorm Momentum: 0.99
|
| 197 |
+
Stochastic Depth Survival: 0.8
|
| 198 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437
|
| 199 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth
|
| 200 |
+
Results:
|
| 201 |
+
- Task: Image Classification
|
| 202 |
+
Dataset: ImageNet
|
| 203 |
+
Metrics:
|
| 204 |
+
Top 1 Accuracy: 81.39%
|
| 205 |
+
Top 5 Accuracy: 95.74%
|
| 206 |
+
- Name: tf_efficientnet_b2_ns
|
| 207 |
+
In Collection: Noisy Student
|
| 208 |
+
Metadata:
|
| 209 |
+
FLOPs: 1234321170
|
| 210 |
+
Parameters: 9110000
|
| 211 |
+
File Size: 36801803
|
| 212 |
+
Architecture:
|
| 213 |
+
- 1x1 Convolution
|
| 214 |
+
- Average Pooling
|
| 215 |
+
- Batch Normalization
|
| 216 |
+
- Convolution
|
| 217 |
+
- Dense Connections
|
| 218 |
+
- Dropout
|
| 219 |
+
- Inverted Residual Block
|
| 220 |
+
- Squeeze-and-Excitation Block
|
| 221 |
+
- Swish
|
| 222 |
+
Tasks:
|
| 223 |
+
- Image Classification
|
| 224 |
+
Training Techniques:
|
| 225 |
+
- AutoAugment
|
| 226 |
+
- FixRes
|
| 227 |
+
- Label Smoothing
|
| 228 |
+
- Noisy Student
|
| 229 |
+
- RMSProp
|
| 230 |
+
- RandAugment
|
| 231 |
+
- Weight Decay
|
| 232 |
+
Training Data:
|
| 233 |
+
- ImageNet
|
| 234 |
+
- JFT-300M
|
| 235 |
+
Training Resources: Cloud TPU v3 Pod
|
| 236 |
+
ID: tf_efficientnet_b2_ns
|
| 237 |
+
LR: 0.128
|
| 238 |
+
Epochs: 700
|
| 239 |
+
Dropout: 0.5
|
| 240 |
+
Crop Pct: '0.89'
|
| 241 |
+
Momentum: 0.9
|
| 242 |
+
Batch Size: 2048
|
| 243 |
+
Image Size: '260'
|
| 244 |
+
Weight Decay: 1.0e-05
|
| 245 |
+
Interpolation: bicubic
|
| 246 |
+
RMSProp Decay: 0.9
|
| 247 |
+
Label Smoothing: 0.1
|
| 248 |
+
BatchNorm Momentum: 0.99
|
| 249 |
+
Stochastic Depth Survival: 0.8
|
| 250 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447
|
| 251 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth
|
| 252 |
+
Results:
|
| 253 |
+
- Task: Image Classification
|
| 254 |
+
Dataset: ImageNet
|
| 255 |
+
Metrics:
|
| 256 |
+
Top 1 Accuracy: 82.39%
|
| 257 |
+
Top 5 Accuracy: 96.24%
|
| 258 |
+
- Name: tf_efficientnet_b3_ns
|
| 259 |
+
In Collection: Noisy Student
|
| 260 |
+
Metadata:
|
| 261 |
+
FLOPs: 2275247568
|
| 262 |
+
Parameters: 12230000
|
| 263 |
+
File Size: 49385734
|
| 264 |
+
Architecture:
|
| 265 |
+
- 1x1 Convolution
|
| 266 |
+
- Average Pooling
|
| 267 |
+
- Batch Normalization
|
| 268 |
+
- Convolution
|
| 269 |
+
- Dense Connections
|
| 270 |
+
- Dropout
|
| 271 |
+
- Inverted Residual Block
|
| 272 |
+
- Squeeze-and-Excitation Block
|
| 273 |
+
- Swish
|
| 274 |
+
Tasks:
|
| 275 |
+
- Image Classification
|
| 276 |
+
Training Techniques:
|
| 277 |
+
- AutoAugment
|
| 278 |
+
- FixRes
|
| 279 |
+
- Label Smoothing
|
| 280 |
+
- Noisy Student
|
| 281 |
+
- RMSProp
|
| 282 |
+
- RandAugment
|
| 283 |
+
- Weight Decay
|
| 284 |
+
Training Data:
|
| 285 |
+
- ImageNet
|
| 286 |
+
- JFT-300M
|
| 287 |
+
Training Resources: Cloud TPU v3 Pod
|
| 288 |
+
ID: tf_efficientnet_b3_ns
|
| 289 |
+
LR: 0.128
|
| 290 |
+
Epochs: 700
|
| 291 |
+
Dropout: 0.5
|
| 292 |
+
Crop Pct: '0.904'
|
| 293 |
+
Momentum: 0.9
|
| 294 |
+
Batch Size: 2048
|
| 295 |
+
Image Size: '300'
|
| 296 |
+
Weight Decay: 1.0e-05
|
| 297 |
+
Interpolation: bicubic
|
| 298 |
+
RMSProp Decay: 0.9
|
| 299 |
+
Label Smoothing: 0.1
|
| 300 |
+
BatchNorm Momentum: 0.99
|
| 301 |
+
Stochastic Depth Survival: 0.8
|
| 302 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457
|
| 303 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth
|
| 304 |
+
Results:
|
| 305 |
+
- Task: Image Classification
|
| 306 |
+
Dataset: ImageNet
|
| 307 |
+
Metrics:
|
| 308 |
+
Top 1 Accuracy: 84.04%
|
| 309 |
+
Top 5 Accuracy: 96.91%
|
| 310 |
+
- Name: tf_efficientnet_b4_ns
|
| 311 |
+
In Collection: Noisy Student
|
| 312 |
+
Metadata:
|
| 313 |
+
FLOPs: 5749638672
|
| 314 |
+
Parameters: 19340000
|
| 315 |
+
File Size: 77995057
|
| 316 |
+
Architecture:
|
| 317 |
+
- 1x1 Convolution
|
| 318 |
+
- Average Pooling
|
| 319 |
+
- Batch Normalization
|
| 320 |
+
- Convolution
|
| 321 |
+
- Dense Connections
|
| 322 |
+
- Dropout
|
| 323 |
+
- Inverted Residual Block
|
| 324 |
+
- Squeeze-and-Excitation Block
|
| 325 |
+
- Swish
|
| 326 |
+
Tasks:
|
| 327 |
+
- Image Classification
|
| 328 |
+
Training Techniques:
|
| 329 |
+
- AutoAugment
|
| 330 |
+
- FixRes
|
| 331 |
+
- Label Smoothing
|
| 332 |
+
- Noisy Student
|
| 333 |
+
- RMSProp
|
| 334 |
+
- RandAugment
|
| 335 |
+
- Weight Decay
|
| 336 |
+
Training Data:
|
| 337 |
+
- ImageNet
|
| 338 |
+
- JFT-300M
|
| 339 |
+
Training Resources: Cloud TPU v3 Pod
|
| 340 |
+
ID: tf_efficientnet_b4_ns
|
| 341 |
+
LR: 0.128
|
| 342 |
+
Epochs: 700
|
| 343 |
+
Dropout: 0.5
|
| 344 |
+
Crop Pct: '0.922'
|
| 345 |
+
Momentum: 0.9
|
| 346 |
+
Batch Size: 2048
|
| 347 |
+
Image Size: '380'
|
| 348 |
+
Weight Decay: 1.0e-05
|
| 349 |
+
Interpolation: bicubic
|
| 350 |
+
RMSProp Decay: 0.9
|
| 351 |
+
Label Smoothing: 0.1
|
| 352 |
+
BatchNorm Momentum: 0.99
|
| 353 |
+
Stochastic Depth Survival: 0.8
|
| 354 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467
|
| 355 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth
|
| 356 |
+
Results:
|
| 357 |
+
- Task: Image Classification
|
| 358 |
+
Dataset: ImageNet
|
| 359 |
+
Metrics:
|
| 360 |
+
Top 1 Accuracy: 85.15%
|
| 361 |
+
Top 5 Accuracy: 97.47%
|
| 362 |
+
- Name: tf_efficientnet_b5_ns
|
| 363 |
+
In Collection: Noisy Student
|
| 364 |
+
Metadata:
|
| 365 |
+
FLOPs: 13176501888
|
| 366 |
+
Parameters: 30390000
|
| 367 |
+
File Size: 122404944
|
| 368 |
+
Architecture:
|
| 369 |
+
- 1x1 Convolution
|
| 370 |
+
- Average Pooling
|
| 371 |
+
- Batch Normalization
|
| 372 |
+
- Convolution
|
| 373 |
+
- Dense Connections
|
| 374 |
+
- Dropout
|
| 375 |
+
- Inverted Residual Block
|
| 376 |
+
- Squeeze-and-Excitation Block
|
| 377 |
+
- Swish
|
| 378 |
+
Tasks:
|
| 379 |
+
- Image Classification
|
| 380 |
+
Training Techniques:
|
| 381 |
+
- AutoAugment
|
| 382 |
+
- FixRes
|
| 383 |
+
- Label Smoothing
|
| 384 |
+
- Noisy Student
|
| 385 |
+
- RMSProp
|
| 386 |
+
- RandAugment
|
| 387 |
+
- Weight Decay
|
| 388 |
+
Training Data:
|
| 389 |
+
- ImageNet
|
| 390 |
+
- JFT-300M
|
| 391 |
+
Training Resources: Cloud TPU v3 Pod
|
| 392 |
+
ID: tf_efficientnet_b5_ns
|
| 393 |
+
LR: 0.128
|
| 394 |
+
Epochs: 350
|
| 395 |
+
Dropout: 0.5
|
| 396 |
+
Crop Pct: '0.934'
|
| 397 |
+
Momentum: 0.9
|
| 398 |
+
Batch Size: 2048
|
| 399 |
+
Image Size: '456'
|
| 400 |
+
Weight Decay: 1.0e-05
|
| 401 |
+
Interpolation: bicubic
|
| 402 |
+
RMSProp Decay: 0.9
|
| 403 |
+
Label Smoothing: 0.1
|
| 404 |
+
BatchNorm Momentum: 0.99
|
| 405 |
+
Stochastic Depth Survival: 0.8
|
| 406 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477
|
| 407 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth
|
| 408 |
+
Results:
|
| 409 |
+
- Task: Image Classification
|
| 410 |
+
Dataset: ImageNet
|
| 411 |
+
Metrics:
|
| 412 |
+
Top 1 Accuracy: 86.08%
|
| 413 |
+
Top 5 Accuracy: 97.75%
|
| 414 |
+
- Name: tf_efficientnet_b6_ns
|
| 415 |
+
In Collection: Noisy Student
|
| 416 |
+
Metadata:
|
| 417 |
+
FLOPs: 24180518488
|
| 418 |
+
Parameters: 43040000
|
| 419 |
+
File Size: 173239537
|
| 420 |
+
Architecture:
|
| 421 |
+
- 1x1 Convolution
|
| 422 |
+
- Average Pooling
|
| 423 |
+
- Batch Normalization
|
| 424 |
+
- Convolution
|
| 425 |
+
- Dense Connections
|
| 426 |
+
- Dropout
|
| 427 |
+
- Inverted Residual Block
|
| 428 |
+
- Squeeze-and-Excitation Block
|
| 429 |
+
- Swish
|
| 430 |
+
Tasks:
|
| 431 |
+
- Image Classification
|
| 432 |
+
Training Techniques:
|
| 433 |
+
- AutoAugment
|
| 434 |
+
- FixRes
|
| 435 |
+
- Label Smoothing
|
| 436 |
+
- Noisy Student
|
| 437 |
+
- RMSProp
|
| 438 |
+
- RandAugment
|
| 439 |
+
- Weight Decay
|
| 440 |
+
Training Data:
|
| 441 |
+
- ImageNet
|
| 442 |
+
- JFT-300M
|
| 443 |
+
Training Resources: Cloud TPU v3 Pod
|
| 444 |
+
ID: tf_efficientnet_b6_ns
|
| 445 |
+
LR: 0.128
|
| 446 |
+
Epochs: 350
|
| 447 |
+
Dropout: 0.5
|
| 448 |
+
Crop Pct: '0.942'
|
| 449 |
+
Momentum: 0.9
|
| 450 |
+
Batch Size: 2048
|
| 451 |
+
Image Size: '528'
|
| 452 |
+
Weight Decay: 1.0e-05
|
| 453 |
+
Interpolation: bicubic
|
| 454 |
+
RMSProp Decay: 0.9
|
| 455 |
+
Label Smoothing: 0.1
|
| 456 |
+
BatchNorm Momentum: 0.99
|
| 457 |
+
Stochastic Depth Survival: 0.8
|
| 458 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487
|
| 459 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth
|
| 460 |
+
Results:
|
| 461 |
+
- Task: Image Classification
|
| 462 |
+
Dataset: ImageNet
|
| 463 |
+
Metrics:
|
| 464 |
+
Top 1 Accuracy: 86.45%
|
| 465 |
+
Top 5 Accuracy: 97.88%
|
| 466 |
+
- Name: tf_efficientnet_b7_ns
|
| 467 |
+
In Collection: Noisy Student
|
| 468 |
+
Metadata:
|
| 469 |
+
FLOPs: 48205304880
|
| 470 |
+
Parameters: 66349999
|
| 471 |
+
File Size: 266853140
|
| 472 |
+
Architecture:
|
| 473 |
+
- 1x1 Convolution
|
| 474 |
+
- Average Pooling
|
| 475 |
+
- Batch Normalization
|
| 476 |
+
- Convolution
|
| 477 |
+
- Dense Connections
|
| 478 |
+
- Dropout
|
| 479 |
+
- Inverted Residual Block
|
| 480 |
+
- Squeeze-and-Excitation Block
|
| 481 |
+
- Swish
|
| 482 |
+
Tasks:
|
| 483 |
+
- Image Classification
|
| 484 |
+
Training Techniques:
|
| 485 |
+
- AutoAugment
|
| 486 |
+
- FixRes
|
| 487 |
+
- Label Smoothing
|
| 488 |
+
- Noisy Student
|
| 489 |
+
- RMSProp
|
| 490 |
+
- RandAugment
|
| 491 |
+
- Weight Decay
|
| 492 |
+
Training Data:
|
| 493 |
+
- ImageNet
|
| 494 |
+
- JFT-300M
|
| 495 |
+
Training Resources: Cloud TPU v3 Pod
|
| 496 |
+
ID: tf_efficientnet_b7_ns
|
| 497 |
+
LR: 0.128
|
| 498 |
+
Epochs: 350
|
| 499 |
+
Dropout: 0.5
|
| 500 |
+
Crop Pct: '0.949'
|
| 501 |
+
Momentum: 0.9
|
| 502 |
+
Batch Size: 2048
|
| 503 |
+
Image Size: '600'
|
| 504 |
+
Weight Decay: 1.0e-05
|
| 505 |
+
Interpolation: bicubic
|
| 506 |
+
RMSProp Decay: 0.9
|
| 507 |
+
Label Smoothing: 0.1
|
| 508 |
+
BatchNorm Momentum: 0.99
|
| 509 |
+
Stochastic Depth Survival: 0.8
|
| 510 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498
|
| 511 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth
|
| 512 |
+
Results:
|
| 513 |
+
- Task: Image Classification
|
| 514 |
+
Dataset: ImageNet
|
| 515 |
+
Metrics:
|
| 516 |
+
Top 1 Accuracy: 86.83%
|
| 517 |
+
Top 5 Accuracy: 98.08%
|
| 518 |
+
- Name: tf_efficientnet_l2_ns
|
| 519 |
+
In Collection: Noisy Student
|
| 520 |
+
Metadata:
|
| 521 |
+
FLOPs: 611646113804
|
| 522 |
+
Parameters: 480310000
|
| 523 |
+
File Size: 1925950424
|
| 524 |
+
Architecture:
|
| 525 |
+
- 1x1 Convolution
|
| 526 |
+
- Average Pooling
|
| 527 |
+
- Batch Normalization
|
| 528 |
+
- Convolution
|
| 529 |
+
- Dense Connections
|
| 530 |
+
- Dropout
|
| 531 |
+
- Inverted Residual Block
|
| 532 |
+
- Squeeze-and-Excitation Block
|
| 533 |
+
- Swish
|
| 534 |
+
Tasks:
|
| 535 |
+
- Image Classification
|
| 536 |
+
Training Techniques:
|
| 537 |
+
- AutoAugment
|
| 538 |
+
- FixRes
|
| 539 |
+
- Label Smoothing
|
| 540 |
+
- Noisy Student
|
| 541 |
+
- RMSProp
|
| 542 |
+
- RandAugment
|
| 543 |
+
- Weight Decay
|
| 544 |
+
Training Data:
|
| 545 |
+
- ImageNet
|
| 546 |
+
- JFT-300M
|
| 547 |
+
Training Resources: Cloud TPU v3 Pod
|
| 548 |
+
Training Time: 6 days
|
| 549 |
+
ID: tf_efficientnet_l2_ns
|
| 550 |
+
LR: 0.128
|
| 551 |
+
Epochs: 350
|
| 552 |
+
Dropout: 0.5
|
| 553 |
+
Crop Pct: '0.96'
|
| 554 |
+
Momentum: 0.9
|
| 555 |
+
Batch Size: 2048
|
| 556 |
+
Image Size: '800'
|
| 557 |
+
Weight Decay: 1.0e-05
|
| 558 |
+
Interpolation: bicubic
|
| 559 |
+
RMSProp Decay: 0.9
|
| 560 |
+
Label Smoothing: 0.1
|
| 561 |
+
BatchNorm Momentum: 0.99
|
| 562 |
+
Stochastic Depth Survival: 0.8
|
| 563 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520
|
| 564 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth
|
| 565 |
+
Results:
|
| 566 |
+
- Task: Image Classification
|
| 567 |
+
Dataset: ImageNet
|
| 568 |
+
Metrics:
|
| 569 |
+
Top 1 Accuracy: 88.35%
|
| 570 |
+
Top 5 Accuracy: 98.66%
|
| 571 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/pnasnet.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PNASNet
|
| 2 |
+
|
| 3 |
+
**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('pnasnet5large', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{liu2018progressive,
|
| 76 |
+
title={Progressive Neural Architecture Search},
|
| 77 |
+
author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy},
|
| 78 |
+
year={2018},
|
| 79 |
+
eprint={1712.00559},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: PNASNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Progressive Neural Architecture Search
|
| 91 |
+
URL: https://paperswithcode.com/paper/progressive-neural-architecture-search
|
| 92 |
+
Models:
|
| 93 |
+
- Name: pnasnet5large
|
| 94 |
+
In Collection: PNASNet
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 31458865950
|
| 97 |
+
Parameters: 86060000
|
| 98 |
+
File Size: 345153926
|
| 99 |
+
Architecture:
|
| 100 |
+
- Average Pooling
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Convolution
|
| 103 |
+
- Depthwise Separable Convolution
|
| 104 |
+
- Dropout
|
| 105 |
+
- ReLU
|
| 106 |
+
Tasks:
|
| 107 |
+
- Image Classification
|
| 108 |
+
Training Techniques:
|
| 109 |
+
- Label Smoothing
|
| 110 |
+
- RMSProp
|
| 111 |
+
- Weight Decay
|
| 112 |
+
Training Data:
|
| 113 |
+
- ImageNet
|
| 114 |
+
Training Resources: 100x NVIDIA P100 GPUs
|
| 115 |
+
ID: pnasnet5large
|
| 116 |
+
LR: 0.015
|
| 117 |
+
Dropout: 0.5
|
| 118 |
+
Crop Pct: '0.911'
|
| 119 |
+
Momentum: 0.9
|
| 120 |
+
Batch Size: 1600
|
| 121 |
+
Image Size: '331'
|
| 122 |
+
Interpolation: bicubic
|
| 123 |
+
Label Smoothing: 0.1
|
| 124 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343
|
| 125 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth
|
| 126 |
+
Results:
|
| 127 |
+
- Task: Image Classification
|
| 128 |
+
Dataset: ImageNet
|
| 129 |
+
Metrics:
|
| 130 |
+
Top 1 Accuracy: 0.98%
|
| 131 |
+
Top 5 Accuracy: 18.58%
|
| 132 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/res2net.md
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Res2Net
|
| 2 |
+
|
| 3 |
+
**Res2Net** is an image model that employs a variation on bottleneck residual blocks, [Res2Net Blocks](https://paperswithcode.com/method/res2net-block). The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('res2net101_26w_4s', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `res2net101_26w_4s`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('res2net101_26w_4s', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{Gao_2021,
|
| 76 |
+
title={Res2Net: A New Multi-Scale Backbone Architecture},
|
| 77 |
+
volume={43},
|
| 78 |
+
ISSN={1939-3539},
|
| 79 |
+
url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
|
| 80 |
+
DOI={10.1109/tpami.2019.2938758},
|
| 81 |
+
number={2},
|
| 82 |
+
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
|
| 83 |
+
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
|
| 84 |
+
author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
|
| 85 |
+
year={2021},
|
| 86 |
+
month={Feb},
|
| 87 |
+
pages={652–662}
|
| 88 |
+
}
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
<!--
|
| 92 |
+
Type: model-index
|
| 93 |
+
Collections:
|
| 94 |
+
- Name: Res2Net
|
| 95 |
+
Paper:
|
| 96 |
+
Title: 'Res2Net: A New Multi-scale Backbone Architecture'
|
| 97 |
+
URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
|
| 98 |
+
Models:
|
| 99 |
+
- Name: res2net101_26w_4s
|
| 100 |
+
In Collection: Res2Net
|
| 101 |
+
Metadata:
|
| 102 |
+
FLOPs: 10415881200
|
| 103 |
+
Parameters: 45210000
|
| 104 |
+
File Size: 181456059
|
| 105 |
+
Architecture:
|
| 106 |
+
- Batch Normalization
|
| 107 |
+
- Convolution
|
| 108 |
+
- Global Average Pooling
|
| 109 |
+
- ReLU
|
| 110 |
+
- Res2Net Block
|
| 111 |
+
Tasks:
|
| 112 |
+
- Image Classification
|
| 113 |
+
Training Techniques:
|
| 114 |
+
- SGD with Momentum
|
| 115 |
+
- Weight Decay
|
| 116 |
+
Training Data:
|
| 117 |
+
- ImageNet
|
| 118 |
+
Training Resources: 4x Titan Xp GPUs
|
| 119 |
+
ID: res2net101_26w_4s
|
| 120 |
+
LR: 0.1
|
| 121 |
+
Epochs: 100
|
| 122 |
+
Crop Pct: '0.875'
|
| 123 |
+
Momentum: 0.9
|
| 124 |
+
Batch Size: 256
|
| 125 |
+
Image Size: '224'
|
| 126 |
+
Weight Decay: 0.0001
|
| 127 |
+
Interpolation: bilinear
|
| 128 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L152
|
| 129 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth
|
| 130 |
+
Results:
|
| 131 |
+
- Task: Image Classification
|
| 132 |
+
Dataset: ImageNet
|
| 133 |
+
Metrics:
|
| 134 |
+
Top 1 Accuracy: 79.19%
|
| 135 |
+
Top 5 Accuracy: 94.43%
|
| 136 |
+
- Name: res2net50_14w_8s
|
| 137 |
+
In Collection: Res2Net
|
| 138 |
+
Metadata:
|
| 139 |
+
FLOPs: 5403546768
|
| 140 |
+
Parameters: 25060000
|
| 141 |
+
File Size: 100638543
|
| 142 |
+
Architecture:
|
| 143 |
+
- Batch Normalization
|
| 144 |
+
- Convolution
|
| 145 |
+
- Global Average Pooling
|
| 146 |
+
- ReLU
|
| 147 |
+
- Res2Net Block
|
| 148 |
+
Tasks:
|
| 149 |
+
- Image Classification
|
| 150 |
+
Training Techniques:
|
| 151 |
+
- SGD with Momentum
|
| 152 |
+
- Weight Decay
|
| 153 |
+
Training Data:
|
| 154 |
+
- ImageNet
|
| 155 |
+
Training Resources: 4x Titan Xp GPUs
|
| 156 |
+
ID: res2net50_14w_8s
|
| 157 |
+
LR: 0.1
|
| 158 |
+
Epochs: 100
|
| 159 |
+
Crop Pct: '0.875'
|
| 160 |
+
Momentum: 0.9
|
| 161 |
+
Batch Size: 256
|
| 162 |
+
Image Size: '224'
|
| 163 |
+
Weight Decay: 0.0001
|
| 164 |
+
Interpolation: bilinear
|
| 165 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L196
|
| 166 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth
|
| 167 |
+
Results:
|
| 168 |
+
- Task: Image Classification
|
| 169 |
+
Dataset: ImageNet
|
| 170 |
+
Metrics:
|
| 171 |
+
Top 1 Accuracy: 78.14%
|
| 172 |
+
Top 5 Accuracy: 93.86%
|
| 173 |
+
- Name: res2net50_26w_4s
|
| 174 |
+
In Collection: Res2Net
|
| 175 |
+
Metadata:
|
| 176 |
+
FLOPs: 5499974064
|
| 177 |
+
Parameters: 25700000
|
| 178 |
+
File Size: 103110087
|
| 179 |
+
Architecture:
|
| 180 |
+
- Batch Normalization
|
| 181 |
+
- Convolution
|
| 182 |
+
- Global Average Pooling
|
| 183 |
+
- ReLU
|
| 184 |
+
- Res2Net Block
|
| 185 |
+
Tasks:
|
| 186 |
+
- Image Classification
|
| 187 |
+
Training Techniques:
|
| 188 |
+
- SGD with Momentum
|
| 189 |
+
- Weight Decay
|
| 190 |
+
Training Data:
|
| 191 |
+
- ImageNet
|
| 192 |
+
Training Resources: 4x Titan Xp GPUs
|
| 193 |
+
ID: res2net50_26w_4s
|
| 194 |
+
LR: 0.1
|
| 195 |
+
Epochs: 100
|
| 196 |
+
Crop Pct: '0.875'
|
| 197 |
+
Momentum: 0.9
|
| 198 |
+
Batch Size: 256
|
| 199 |
+
Image Size: '224'
|
| 200 |
+
Weight Decay: 0.0001
|
| 201 |
+
Interpolation: bilinear
|
| 202 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L141
|
| 203 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth
|
| 204 |
+
Results:
|
| 205 |
+
- Task: Image Classification
|
| 206 |
+
Dataset: ImageNet
|
| 207 |
+
Metrics:
|
| 208 |
+
Top 1 Accuracy: 77.99%
|
| 209 |
+
Top 5 Accuracy: 93.85%
|
| 210 |
+
- Name: res2net50_26w_6s
|
| 211 |
+
In Collection: Res2Net
|
| 212 |
+
Metadata:
|
| 213 |
+
FLOPs: 8130156528
|
| 214 |
+
Parameters: 37050000
|
| 215 |
+
File Size: 148603239
|
| 216 |
+
Architecture:
|
| 217 |
+
- Batch Normalization
|
| 218 |
+
- Convolution
|
| 219 |
+
- Global Average Pooling
|
| 220 |
+
- ReLU
|
| 221 |
+
- Res2Net Block
|
| 222 |
+
Tasks:
|
| 223 |
+
- Image Classification
|
| 224 |
+
Training Techniques:
|
| 225 |
+
- SGD with Momentum
|
| 226 |
+
- Weight Decay
|
| 227 |
+
Training Data:
|
| 228 |
+
- ImageNet
|
| 229 |
+
Training Resources: 4x Titan Xp GPUs
|
| 230 |
+
ID: res2net50_26w_6s
|
| 231 |
+
LR: 0.1
|
| 232 |
+
Epochs: 100
|
| 233 |
+
Crop Pct: '0.875'
|
| 234 |
+
Momentum: 0.9
|
| 235 |
+
Batch Size: 256
|
| 236 |
+
Image Size: '224'
|
| 237 |
+
Weight Decay: 0.0001
|
| 238 |
+
Interpolation: bilinear
|
| 239 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L163
|
| 240 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth
|
| 241 |
+
Results:
|
| 242 |
+
- Task: Image Classification
|
| 243 |
+
Dataset: ImageNet
|
| 244 |
+
Metrics:
|
| 245 |
+
Top 1 Accuracy: 78.57%
|
| 246 |
+
Top 5 Accuracy: 94.12%
|
| 247 |
+
- Name: res2net50_26w_8s
|
| 248 |
+
In Collection: Res2Net
|
| 249 |
+
Metadata:
|
| 250 |
+
FLOPs: 10760338992
|
| 251 |
+
Parameters: 48400000
|
| 252 |
+
File Size: 194085165
|
| 253 |
+
Architecture:
|
| 254 |
+
- Batch Normalization
|
| 255 |
+
- Convolution
|
| 256 |
+
- Global Average Pooling
|
| 257 |
+
- ReLU
|
| 258 |
+
- Res2Net Block
|
| 259 |
+
Tasks:
|
| 260 |
+
- Image Classification
|
| 261 |
+
Training Techniques:
|
| 262 |
+
- SGD with Momentum
|
| 263 |
+
- Weight Decay
|
| 264 |
+
Training Data:
|
| 265 |
+
- ImageNet
|
| 266 |
+
Training Resources: 4x Titan Xp GPUs
|
| 267 |
+
ID: res2net50_26w_8s
|
| 268 |
+
LR: 0.1
|
| 269 |
+
Epochs: 100
|
| 270 |
+
Crop Pct: '0.875'
|
| 271 |
+
Momentum: 0.9
|
| 272 |
+
Batch Size: 256
|
| 273 |
+
Image Size: '224'
|
| 274 |
+
Weight Decay: 0.0001
|
| 275 |
+
Interpolation: bilinear
|
| 276 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L174
|
| 277 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth
|
| 278 |
+
Results:
|
| 279 |
+
- Task: Image Classification
|
| 280 |
+
Dataset: ImageNet
|
| 281 |
+
Metrics:
|
| 282 |
+
Top 1 Accuracy: 79.19%
|
| 283 |
+
Top 5 Accuracy: 94.37%
|
| 284 |
+
- Name: res2net50_48w_2s
|
| 285 |
+
In Collection: Res2Net
|
| 286 |
+
Metadata:
|
| 287 |
+
FLOPs: 5375291520
|
| 288 |
+
Parameters: 25290000
|
| 289 |
+
File Size: 101421406
|
| 290 |
+
Architecture:
|
| 291 |
+
- Batch Normalization
|
| 292 |
+
- Convolution
|
| 293 |
+
- Global Average Pooling
|
| 294 |
+
- ReLU
|
| 295 |
+
- Res2Net Block
|
| 296 |
+
Tasks:
|
| 297 |
+
- Image Classification
|
| 298 |
+
Training Techniques:
|
| 299 |
+
- SGD with Momentum
|
| 300 |
+
- Weight Decay
|
| 301 |
+
Training Data:
|
| 302 |
+
- ImageNet
|
| 303 |
+
Training Resources: 4x Titan Xp GPUs
|
| 304 |
+
ID: res2net50_48w_2s
|
| 305 |
+
LR: 0.1
|
| 306 |
+
Epochs: 100
|
| 307 |
+
Crop Pct: '0.875'
|
| 308 |
+
Momentum: 0.9
|
| 309 |
+
Batch Size: 256
|
| 310 |
+
Image Size: '224'
|
| 311 |
+
Weight Decay: 0.0001
|
| 312 |
+
Interpolation: bilinear
|
| 313 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L185
|
| 314 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth
|
| 315 |
+
Results:
|
| 316 |
+
- Task: Image Classification
|
| 317 |
+
Dataset: ImageNet
|
| 318 |
+
Metrics:
|
| 319 |
+
Top 1 Accuracy: 77.53%
|
| 320 |
+
Top 5 Accuracy: 93.56%
|
| 321 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/resnest.md
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ResNeSt
|
| 2 |
+
|
| 3 |
+
A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: $V = \text{Concat}${$V^{1},V^{2},\cdots{V}^{K}$}. As in standard residual blocks, the final output $Y$ of otheur Split-Attention block is produced using a shortcut connection: $Y=V+X$, if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation $\mathcal{T}$ is applied to the shortcut connection to align the output shapes: $Y=V+\mathcal{T}(X)$. For example, $\mathcal{T}$ can be strided convolution or combined convolution-with-pooling.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('resnest101e', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `resnest101e`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('resnest101e', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{zhang2020resnest,
|
| 76 |
+
title={ResNeSt: Split-Attention Networks},
|
| 77 |
+
author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola},
|
| 78 |
+
year={2020},
|
| 79 |
+
eprint={2004.08955},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: ResNeSt
|
| 89 |
+
Paper:
|
| 90 |
+
Title: 'ResNeSt: Split-Attention Networks'
|
| 91 |
+
URL: https://paperswithcode.com/paper/resnest-split-attention-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: resnest101e
|
| 94 |
+
In Collection: ResNeSt
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 17423183648
|
| 97 |
+
Parameters: 48280000
|
| 98 |
+
File Size: 193782911
|
| 99 |
+
Architecture:
|
| 100 |
+
- 1x1 Convolution
|
| 101 |
+
- Convolution
|
| 102 |
+
- Dense Connections
|
| 103 |
+
- Global Average Pooling
|
| 104 |
+
- Max Pooling
|
| 105 |
+
- ReLU
|
| 106 |
+
- Residual Connection
|
| 107 |
+
- Softmax
|
| 108 |
+
- Split Attention
|
| 109 |
+
Tasks:
|
| 110 |
+
- Image Classification
|
| 111 |
+
Training Techniques:
|
| 112 |
+
- AutoAugment
|
| 113 |
+
- DropBlock
|
| 114 |
+
- Label Smoothing
|
| 115 |
+
- Mixup
|
| 116 |
+
- SGD with Momentum
|
| 117 |
+
- Weight Decay
|
| 118 |
+
Training Data:
|
| 119 |
+
- ImageNet
|
| 120 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 121 |
+
ID: resnest101e
|
| 122 |
+
LR: 0.1
|
| 123 |
+
Epochs: 270
|
| 124 |
+
Layers: 101
|
| 125 |
+
Dropout: 0.2
|
| 126 |
+
Crop Pct: '0.875'
|
| 127 |
+
Momentum: 0.9
|
| 128 |
+
Batch Size: 4096
|
| 129 |
+
Image Size: '256'
|
| 130 |
+
Weight Decay: 0.0001
|
| 131 |
+
Interpolation: bilinear
|
| 132 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L182
|
| 133 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth
|
| 134 |
+
Results:
|
| 135 |
+
- Task: Image Classification
|
| 136 |
+
Dataset: ImageNet
|
| 137 |
+
Metrics:
|
| 138 |
+
Top 1 Accuracy: 82.88%
|
| 139 |
+
Top 5 Accuracy: 96.31%
|
| 140 |
+
- Name: resnest14d
|
| 141 |
+
In Collection: ResNeSt
|
| 142 |
+
Metadata:
|
| 143 |
+
FLOPs: 3548594464
|
| 144 |
+
Parameters: 10610000
|
| 145 |
+
File Size: 42562639
|
| 146 |
+
Architecture:
|
| 147 |
+
- 1x1 Convolution
|
| 148 |
+
- Convolution
|
| 149 |
+
- Dense Connections
|
| 150 |
+
- Global Average Pooling
|
| 151 |
+
- Max Pooling
|
| 152 |
+
- ReLU
|
| 153 |
+
- Residual Connection
|
| 154 |
+
- Softmax
|
| 155 |
+
- Split Attention
|
| 156 |
+
Tasks:
|
| 157 |
+
- Image Classification
|
| 158 |
+
Training Techniques:
|
| 159 |
+
- AutoAugment
|
| 160 |
+
- DropBlock
|
| 161 |
+
- Label Smoothing
|
| 162 |
+
- Mixup
|
| 163 |
+
- SGD with Momentum
|
| 164 |
+
- Weight Decay
|
| 165 |
+
Training Data:
|
| 166 |
+
- ImageNet
|
| 167 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 168 |
+
ID: resnest14d
|
| 169 |
+
LR: 0.1
|
| 170 |
+
Epochs: 270
|
| 171 |
+
Layers: 14
|
| 172 |
+
Dropout: 0.2
|
| 173 |
+
Crop Pct: '0.875'
|
| 174 |
+
Momentum: 0.9
|
| 175 |
+
Batch Size: 8192
|
| 176 |
+
Image Size: '224'
|
| 177 |
+
Weight Decay: 0.0001
|
| 178 |
+
Interpolation: bilinear
|
| 179 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L148
|
| 180 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth
|
| 181 |
+
Results:
|
| 182 |
+
- Task: Image Classification
|
| 183 |
+
Dataset: ImageNet
|
| 184 |
+
Metrics:
|
| 185 |
+
Top 1 Accuracy: 75.51%
|
| 186 |
+
Top 5 Accuracy: 92.52%
|
| 187 |
+
- Name: resnest200e
|
| 188 |
+
In Collection: ResNeSt
|
| 189 |
+
Metadata:
|
| 190 |
+
FLOPs: 45954387872
|
| 191 |
+
Parameters: 70200000
|
| 192 |
+
File Size: 193782911
|
| 193 |
+
Architecture:
|
| 194 |
+
- 1x1 Convolution
|
| 195 |
+
- Convolution
|
| 196 |
+
- Dense Connections
|
| 197 |
+
- Global Average Pooling
|
| 198 |
+
- Max Pooling
|
| 199 |
+
- ReLU
|
| 200 |
+
- Residual Connection
|
| 201 |
+
- Softmax
|
| 202 |
+
- Split Attention
|
| 203 |
+
Tasks:
|
| 204 |
+
- Image Classification
|
| 205 |
+
Training Techniques:
|
| 206 |
+
- AutoAugment
|
| 207 |
+
- DropBlock
|
| 208 |
+
- Label Smoothing
|
| 209 |
+
- Mixup
|
| 210 |
+
- SGD with Momentum
|
| 211 |
+
- Weight Decay
|
| 212 |
+
Training Data:
|
| 213 |
+
- ImageNet
|
| 214 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 215 |
+
ID: resnest200e
|
| 216 |
+
LR: 0.1
|
| 217 |
+
Epochs: 270
|
| 218 |
+
Layers: 200
|
| 219 |
+
Dropout: 0.2
|
| 220 |
+
Crop Pct: '0.909'
|
| 221 |
+
Momentum: 0.9
|
| 222 |
+
Batch Size: 2048
|
| 223 |
+
Image Size: '320'
|
| 224 |
+
Weight Decay: 0.0001
|
| 225 |
+
Interpolation: bicubic
|
| 226 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L194
|
| 227 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth
|
| 228 |
+
Results:
|
| 229 |
+
- Task: Image Classification
|
| 230 |
+
Dataset: ImageNet
|
| 231 |
+
Metrics:
|
| 232 |
+
Top 1 Accuracy: 83.85%
|
| 233 |
+
Top 5 Accuracy: 96.89%
|
| 234 |
+
- Name: resnest269e
|
| 235 |
+
In Collection: ResNeSt
|
| 236 |
+
Metadata:
|
| 237 |
+
FLOPs: 100830307104
|
| 238 |
+
Parameters: 110930000
|
| 239 |
+
File Size: 445402691
|
| 240 |
+
Architecture:
|
| 241 |
+
- 1x1 Convolution
|
| 242 |
+
- Convolution
|
| 243 |
+
- Dense Connections
|
| 244 |
+
- Global Average Pooling
|
| 245 |
+
- Max Pooling
|
| 246 |
+
- ReLU
|
| 247 |
+
- Residual Connection
|
| 248 |
+
- Softmax
|
| 249 |
+
- Split Attention
|
| 250 |
+
Tasks:
|
| 251 |
+
- Image Classification
|
| 252 |
+
Training Techniques:
|
| 253 |
+
- AutoAugment
|
| 254 |
+
- DropBlock
|
| 255 |
+
- Label Smoothing
|
| 256 |
+
- Mixup
|
| 257 |
+
- SGD with Momentum
|
| 258 |
+
- Weight Decay
|
| 259 |
+
Training Data:
|
| 260 |
+
- ImageNet
|
| 261 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 262 |
+
ID: resnest269e
|
| 263 |
+
LR: 0.1
|
| 264 |
+
Epochs: 270
|
| 265 |
+
Layers: 269
|
| 266 |
+
Dropout: 0.2
|
| 267 |
+
Crop Pct: '0.928'
|
| 268 |
+
Momentum: 0.9
|
| 269 |
+
Batch Size: 2048
|
| 270 |
+
Image Size: '416'
|
| 271 |
+
Weight Decay: 0.0001
|
| 272 |
+
Interpolation: bicubic
|
| 273 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L206
|
| 274 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth
|
| 275 |
+
Results:
|
| 276 |
+
- Task: Image Classification
|
| 277 |
+
Dataset: ImageNet
|
| 278 |
+
Metrics:
|
| 279 |
+
Top 1 Accuracy: 84.53%
|
| 280 |
+
Top 5 Accuracy: 96.99%
|
| 281 |
+
- Name: resnest26d
|
| 282 |
+
In Collection: ResNeSt
|
| 283 |
+
Metadata:
|
| 284 |
+
FLOPs: 4678918720
|
| 285 |
+
Parameters: 17070000
|
| 286 |
+
File Size: 68470242
|
| 287 |
+
Architecture:
|
| 288 |
+
- 1x1 Convolution
|
| 289 |
+
- Convolution
|
| 290 |
+
- Dense Connections
|
| 291 |
+
- Global Average Pooling
|
| 292 |
+
- Max Pooling
|
| 293 |
+
- ReLU
|
| 294 |
+
- Residual Connection
|
| 295 |
+
- Softmax
|
| 296 |
+
- Split Attention
|
| 297 |
+
Tasks:
|
| 298 |
+
- Image Classification
|
| 299 |
+
Training Techniques:
|
| 300 |
+
- AutoAugment
|
| 301 |
+
- DropBlock
|
| 302 |
+
- Label Smoothing
|
| 303 |
+
- Mixup
|
| 304 |
+
- SGD with Momentum
|
| 305 |
+
- Weight Decay
|
| 306 |
+
Training Data:
|
| 307 |
+
- ImageNet
|
| 308 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 309 |
+
ID: resnest26d
|
| 310 |
+
LR: 0.1
|
| 311 |
+
Epochs: 270
|
| 312 |
+
Layers: 26
|
| 313 |
+
Dropout: 0.2
|
| 314 |
+
Crop Pct: '0.875'
|
| 315 |
+
Momentum: 0.9
|
| 316 |
+
Batch Size: 8192
|
| 317 |
+
Image Size: '224'
|
| 318 |
+
Weight Decay: 0.0001
|
| 319 |
+
Interpolation: bilinear
|
| 320 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L159
|
| 321 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth
|
| 322 |
+
Results:
|
| 323 |
+
- Task: Image Classification
|
| 324 |
+
Dataset: ImageNet
|
| 325 |
+
Metrics:
|
| 326 |
+
Top 1 Accuracy: 78.48%
|
| 327 |
+
Top 5 Accuracy: 94.3%
|
| 328 |
+
- Name: resnest50d
|
| 329 |
+
In Collection: ResNeSt
|
| 330 |
+
Metadata:
|
| 331 |
+
FLOPs: 6937106336
|
| 332 |
+
Parameters: 27480000
|
| 333 |
+
File Size: 110273258
|
| 334 |
+
Architecture:
|
| 335 |
+
- 1x1 Convolution
|
| 336 |
+
- Convolution
|
| 337 |
+
- Dense Connections
|
| 338 |
+
- Global Average Pooling
|
| 339 |
+
- Max Pooling
|
| 340 |
+
- ReLU
|
| 341 |
+
- Residual Connection
|
| 342 |
+
- Softmax
|
| 343 |
+
- Split Attention
|
| 344 |
+
Tasks:
|
| 345 |
+
- Image Classification
|
| 346 |
+
Training Techniques:
|
| 347 |
+
- AutoAugment
|
| 348 |
+
- DropBlock
|
| 349 |
+
- Label Smoothing
|
| 350 |
+
- Mixup
|
| 351 |
+
- SGD with Momentum
|
| 352 |
+
- Weight Decay
|
| 353 |
+
Training Data:
|
| 354 |
+
- ImageNet
|
| 355 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 356 |
+
ID: resnest50d
|
| 357 |
+
LR: 0.1
|
| 358 |
+
Epochs: 270
|
| 359 |
+
Layers: 50
|
| 360 |
+
Dropout: 0.2
|
| 361 |
+
Crop Pct: '0.875'
|
| 362 |
+
Momentum: 0.9
|
| 363 |
+
Batch Size: 8192
|
| 364 |
+
Image Size: '224'
|
| 365 |
+
Weight Decay: 0.0001
|
| 366 |
+
Interpolation: bilinear
|
| 367 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L170
|
| 368 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth
|
| 369 |
+
Results:
|
| 370 |
+
- Task: Image Classification
|
| 371 |
+
Dataset: ImageNet
|
| 372 |
+
Metrics:
|
| 373 |
+
Top 1 Accuracy: 80.96%
|
| 374 |
+
Top 5 Accuracy: 95.38%
|
| 375 |
+
- Name: resnest50d_1s4x24d
|
| 376 |
+
In Collection: ResNeSt
|
| 377 |
+
Metadata:
|
| 378 |
+
FLOPs: 5686764544
|
| 379 |
+
Parameters: 25680000
|
| 380 |
+
File Size: 103045531
|
| 381 |
+
Architecture:
|
| 382 |
+
- 1x1 Convolution
|
| 383 |
+
- Convolution
|
| 384 |
+
- Dense Connections
|
| 385 |
+
- Global Average Pooling
|
| 386 |
+
- Max Pooling
|
| 387 |
+
- ReLU
|
| 388 |
+
- Residual Connection
|
| 389 |
+
- Softmax
|
| 390 |
+
- Split Attention
|
| 391 |
+
Tasks:
|
| 392 |
+
- Image Classification
|
| 393 |
+
Training Techniques:
|
| 394 |
+
- AutoAugment
|
| 395 |
+
- DropBlock
|
| 396 |
+
- Label Smoothing
|
| 397 |
+
- Mixup
|
| 398 |
+
- SGD with Momentum
|
| 399 |
+
- Weight Decay
|
| 400 |
+
Training Data:
|
| 401 |
+
- ImageNet
|
| 402 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 403 |
+
ID: resnest50d_1s4x24d
|
| 404 |
+
LR: 0.1
|
| 405 |
+
Epochs: 270
|
| 406 |
+
Layers: 50
|
| 407 |
+
Dropout: 0.2
|
| 408 |
+
Crop Pct: '0.875'
|
| 409 |
+
Momentum: 0.9
|
| 410 |
+
Batch Size: 8192
|
| 411 |
+
Image Size: '224'
|
| 412 |
+
Weight Decay: 0.0001
|
| 413 |
+
Interpolation: bicubic
|
| 414 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L229
|
| 415 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth
|
| 416 |
+
Results:
|
| 417 |
+
- Task: Image Classification
|
| 418 |
+
Dataset: ImageNet
|
| 419 |
+
Metrics:
|
| 420 |
+
Top 1 Accuracy: 81.0%
|
| 421 |
+
Top 5 Accuracy: 95.33%
|
| 422 |
+
- Name: resnest50d_4s2x40d
|
| 423 |
+
In Collection: ResNeSt
|
| 424 |
+
Metadata:
|
| 425 |
+
FLOPs: 5657064720
|
| 426 |
+
Parameters: 30420000
|
| 427 |
+
File Size: 122133282
|
| 428 |
+
Architecture:
|
| 429 |
+
- 1x1 Convolution
|
| 430 |
+
- Convolution
|
| 431 |
+
- Dense Connections
|
| 432 |
+
- Global Average Pooling
|
| 433 |
+
- Max Pooling
|
| 434 |
+
- ReLU
|
| 435 |
+
- Residual Connection
|
| 436 |
+
- Softmax
|
| 437 |
+
- Split Attention
|
| 438 |
+
Tasks:
|
| 439 |
+
- Image Classification
|
| 440 |
+
Training Techniques:
|
| 441 |
+
- AutoAugment
|
| 442 |
+
- DropBlock
|
| 443 |
+
- Label Smoothing
|
| 444 |
+
- Mixup
|
| 445 |
+
- SGD with Momentum
|
| 446 |
+
- Weight Decay
|
| 447 |
+
Training Data:
|
| 448 |
+
- ImageNet
|
| 449 |
+
Training Resources: 64x NVIDIA V100 GPUs
|
| 450 |
+
ID: resnest50d_4s2x40d
|
| 451 |
+
LR: 0.1
|
| 452 |
+
Epochs: 270
|
| 453 |
+
Layers: 50
|
| 454 |
+
Dropout: 0.2
|
| 455 |
+
Crop Pct: '0.875'
|
| 456 |
+
Momentum: 0.9
|
| 457 |
+
Batch Size: 8192
|
| 458 |
+
Image Size: '224'
|
| 459 |
+
Weight Decay: 0.0001
|
| 460 |
+
Interpolation: bicubic
|
| 461 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L218
|
| 462 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth
|
| 463 |
+
Results:
|
| 464 |
+
- Task: Image Classification
|
| 465 |
+
Dataset: ImageNet
|
| 466 |
+
Metrics:
|
| 467 |
+
Top 1 Accuracy: 81.11%
|
| 468 |
+
Top 5 Accuracy: 95.55%
|
| 469 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/resnet-d.md
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ResNet-D
|
| 2 |
+
|
| 3 |
+
**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1×1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('resnet101d', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `resnet101d`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('resnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{he2018bag,
|
| 76 |
+
title={Bag of Tricks for Image Classification with Convolutional Neural Networks},
|
| 77 |
+
author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li},
|
| 78 |
+
year={2018},
|
| 79 |
+
eprint={1812.01187},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: ResNet-D
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Bag of Tricks for Image Classification with Convolutional Neural Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/bag-of-tricks-for-image-classification-with
|
| 92 |
+
Models:
|
| 93 |
+
- Name: resnet101d
|
| 94 |
+
In Collection: ResNet-D
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 13805639680
|
| 97 |
+
Parameters: 44570000
|
| 98 |
+
File Size: 178791263
|
| 99 |
+
Architecture:
|
| 100 |
+
- 1x1 Convolution
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Bottleneck Residual Block
|
| 103 |
+
- Convolution
|
| 104 |
+
- Global Average Pooling
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- ReLU
|
| 107 |
+
- Residual Block
|
| 108 |
+
- Residual Connection
|
| 109 |
+
- Softmax
|
| 110 |
+
Tasks:
|
| 111 |
+
- Image Classification
|
| 112 |
+
Training Data:
|
| 113 |
+
- ImageNet
|
| 114 |
+
ID: resnet101d
|
| 115 |
+
Crop Pct: '0.94'
|
| 116 |
+
Image Size: '256'
|
| 117 |
+
Interpolation: bicubic
|
| 118 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L716
|
| 119 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth
|
| 120 |
+
Results:
|
| 121 |
+
- Task: Image Classification
|
| 122 |
+
Dataset: ImageNet
|
| 123 |
+
Metrics:
|
| 124 |
+
Top 1 Accuracy: 82.31%
|
| 125 |
+
Top 5 Accuracy: 96.06%
|
| 126 |
+
- Name: resnet152d
|
| 127 |
+
In Collection: ResNet-D
|
| 128 |
+
Metadata:
|
| 129 |
+
FLOPs: 20155275264
|
| 130 |
+
Parameters: 60210000
|
| 131 |
+
File Size: 241596837
|
| 132 |
+
Architecture:
|
| 133 |
+
- 1x1 Convolution
|
| 134 |
+
- Batch Normalization
|
| 135 |
+
- Bottleneck Residual Block
|
| 136 |
+
- Convolution
|
| 137 |
+
- Global Average Pooling
|
| 138 |
+
- Max Pooling
|
| 139 |
+
- ReLU
|
| 140 |
+
- Residual Block
|
| 141 |
+
- Residual Connection
|
| 142 |
+
- Softmax
|
| 143 |
+
Tasks:
|
| 144 |
+
- Image Classification
|
| 145 |
+
Training Data:
|
| 146 |
+
- ImageNet
|
| 147 |
+
ID: resnet152d
|
| 148 |
+
Crop Pct: '0.94'
|
| 149 |
+
Image Size: '256'
|
| 150 |
+
Interpolation: bicubic
|
| 151 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L724
|
| 152 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth
|
| 153 |
+
Results:
|
| 154 |
+
- Task: Image Classification
|
| 155 |
+
Dataset: ImageNet
|
| 156 |
+
Metrics:
|
| 157 |
+
Top 1 Accuracy: 83.13%
|
| 158 |
+
Top 5 Accuracy: 96.35%
|
| 159 |
+
- Name: resnet18d
|
| 160 |
+
In Collection: ResNet-D
|
| 161 |
+
Metadata:
|
| 162 |
+
FLOPs: 2645205760
|
| 163 |
+
Parameters: 11710000
|
| 164 |
+
File Size: 46893231
|
| 165 |
+
Architecture:
|
| 166 |
+
- 1x1 Convolution
|
| 167 |
+
- Batch Normalization
|
| 168 |
+
- Bottleneck Residual Block
|
| 169 |
+
- Convolution
|
| 170 |
+
- Global Average Pooling
|
| 171 |
+
- Max Pooling
|
| 172 |
+
- ReLU
|
| 173 |
+
- Residual Block
|
| 174 |
+
- Residual Connection
|
| 175 |
+
- Softmax
|
| 176 |
+
Tasks:
|
| 177 |
+
- Image Classification
|
| 178 |
+
Training Data:
|
| 179 |
+
- ImageNet
|
| 180 |
+
ID: resnet18d
|
| 181 |
+
Crop Pct: '0.875'
|
| 182 |
+
Image Size: '224'
|
| 183 |
+
Interpolation: bicubic
|
| 184 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L649
|
| 185 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth
|
| 186 |
+
Results:
|
| 187 |
+
- Task: Image Classification
|
| 188 |
+
Dataset: ImageNet
|
| 189 |
+
Metrics:
|
| 190 |
+
Top 1 Accuracy: 72.27%
|
| 191 |
+
Top 5 Accuracy: 90.69%
|
| 192 |
+
- Name: resnet200d
|
| 193 |
+
In Collection: ResNet-D
|
| 194 |
+
Metadata:
|
| 195 |
+
FLOPs: 26034378752
|
| 196 |
+
Parameters: 64690000
|
| 197 |
+
File Size: 259662933
|
| 198 |
+
Architecture:
|
| 199 |
+
- 1x1 Convolution
|
| 200 |
+
- Batch Normalization
|
| 201 |
+
- Bottleneck Residual Block
|
| 202 |
+
- Convolution
|
| 203 |
+
- Global Average Pooling
|
| 204 |
+
- Max Pooling
|
| 205 |
+
- ReLU
|
| 206 |
+
- Residual Block
|
| 207 |
+
- Residual Connection
|
| 208 |
+
- Softmax
|
| 209 |
+
Tasks:
|
| 210 |
+
- Image Classification
|
| 211 |
+
Training Data:
|
| 212 |
+
- ImageNet
|
| 213 |
+
ID: resnet200d
|
| 214 |
+
Crop Pct: '0.94'
|
| 215 |
+
Image Size: '256'
|
| 216 |
+
Interpolation: bicubic
|
| 217 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L749
|
| 218 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth
|
| 219 |
+
Results:
|
| 220 |
+
- Task: Image Classification
|
| 221 |
+
Dataset: ImageNet
|
| 222 |
+
Metrics:
|
| 223 |
+
Top 1 Accuracy: 83.24%
|
| 224 |
+
Top 5 Accuracy: 96.49%
|
| 225 |
+
- Name: resnet26d
|
| 226 |
+
In Collection: ResNet-D
|
| 227 |
+
Metadata:
|
| 228 |
+
FLOPs: 3335276032
|
| 229 |
+
Parameters: 16010000
|
| 230 |
+
File Size: 64209122
|
| 231 |
+
Architecture:
|
| 232 |
+
- 1x1 Convolution
|
| 233 |
+
- Batch Normalization
|
| 234 |
+
- Bottleneck Residual Block
|
| 235 |
+
- Convolution
|
| 236 |
+
- Global Average Pooling
|
| 237 |
+
- Max Pooling
|
| 238 |
+
- ReLU
|
| 239 |
+
- Residual Block
|
| 240 |
+
- Residual Connection
|
| 241 |
+
- Softmax
|
| 242 |
+
Tasks:
|
| 243 |
+
- Image Classification
|
| 244 |
+
Training Data:
|
| 245 |
+
- ImageNet
|
| 246 |
+
ID: resnet26d
|
| 247 |
+
Crop Pct: '0.875'
|
| 248 |
+
Image Size: '224'
|
| 249 |
+
Interpolation: bicubic
|
| 250 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L683
|
| 251 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth
|
| 252 |
+
Results:
|
| 253 |
+
- Task: Image Classification
|
| 254 |
+
Dataset: ImageNet
|
| 255 |
+
Metrics:
|
| 256 |
+
Top 1 Accuracy: 76.69%
|
| 257 |
+
Top 5 Accuracy: 93.15%
|
| 258 |
+
- Name: resnet34d
|
| 259 |
+
In Collection: ResNet-D
|
| 260 |
+
Metadata:
|
| 261 |
+
FLOPs: 5026601728
|
| 262 |
+
Parameters: 21820000
|
| 263 |
+
File Size: 87369807
|
| 264 |
+
Architecture:
|
| 265 |
+
- 1x1 Convolution
|
| 266 |
+
- Batch Normalization
|
| 267 |
+
- Bottleneck Residual Block
|
| 268 |
+
- Convolution
|
| 269 |
+
- Global Average Pooling
|
| 270 |
+
- Max Pooling
|
| 271 |
+
- ReLU
|
| 272 |
+
- Residual Block
|
| 273 |
+
- Residual Connection
|
| 274 |
+
- Softmax
|
| 275 |
+
Tasks:
|
| 276 |
+
- Image Classification
|
| 277 |
+
Training Data:
|
| 278 |
+
- ImageNet
|
| 279 |
+
ID: resnet34d
|
| 280 |
+
Crop Pct: '0.875'
|
| 281 |
+
Image Size: '224'
|
| 282 |
+
Interpolation: bicubic
|
| 283 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L666
|
| 284 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth
|
| 285 |
+
Results:
|
| 286 |
+
- Task: Image Classification
|
| 287 |
+
Dataset: ImageNet
|
| 288 |
+
Metrics:
|
| 289 |
+
Top 1 Accuracy: 77.11%
|
| 290 |
+
Top 5 Accuracy: 93.38%
|
| 291 |
+
- Name: resnet50d
|
| 292 |
+
In Collection: ResNet-D
|
| 293 |
+
Metadata:
|
| 294 |
+
FLOPs: 5591002624
|
| 295 |
+
Parameters: 25580000
|
| 296 |
+
File Size: 102567109
|
| 297 |
+
Architecture:
|
| 298 |
+
- 1x1 Convolution
|
| 299 |
+
- Batch Normalization
|
| 300 |
+
- Bottleneck Residual Block
|
| 301 |
+
- Convolution
|
| 302 |
+
- Global Average Pooling
|
| 303 |
+
- Max Pooling
|
| 304 |
+
- ReLU
|
| 305 |
+
- Residual Block
|
| 306 |
+
- Residual Connection
|
| 307 |
+
- Softmax
|
| 308 |
+
Tasks:
|
| 309 |
+
- Image Classification
|
| 310 |
+
Training Data:
|
| 311 |
+
- ImageNet
|
| 312 |
+
ID: resnet50d
|
| 313 |
+
Crop Pct: '0.875'
|
| 314 |
+
Image Size: '224'
|
| 315 |
+
Interpolation: bicubic
|
| 316 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L699
|
| 317 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth
|
| 318 |
+
Results:
|
| 319 |
+
- Task: Image Classification
|
| 320 |
+
Dataset: ImageNet
|
| 321 |
+
Metrics:
|
| 322 |
+
Top 1 Accuracy: 80.55%
|
| 323 |
+
Top 5 Accuracy: 95.16%
|
| 324 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/resnet.md
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ResNet
|
| 2 |
+
|
| 3 |
+
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('resnet18', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{DBLP:journals/corr/HeZRS15,
|
| 76 |
+
author = {Kaiming He and
|
| 77 |
+
Xiangyu Zhang and
|
| 78 |
+
Shaoqing Ren and
|
| 79 |
+
Jian Sun},
|
| 80 |
+
title = {Deep Residual Learning for Image Recognition},
|
| 81 |
+
journal = {CoRR},
|
| 82 |
+
volume = {abs/1512.03385},
|
| 83 |
+
year = {2015},
|
| 84 |
+
url = {http://arxiv.org/abs/1512.03385},
|
| 85 |
+
archivePrefix = {arXiv},
|
| 86 |
+
eprint = {1512.03385},
|
| 87 |
+
timestamp = {Wed, 17 Apr 2019 17:23:45 +0200},
|
| 88 |
+
biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib},
|
| 89 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 90 |
+
}
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
<!--
|
| 94 |
+
Type: model-index
|
| 95 |
+
Collections:
|
| 96 |
+
- Name: ResNet
|
| 97 |
+
Paper:
|
| 98 |
+
Title: Deep Residual Learning for Image Recognition
|
| 99 |
+
URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
|
| 100 |
+
Models:
|
| 101 |
+
- Name: resnet18
|
| 102 |
+
In Collection: ResNet
|
| 103 |
+
Metadata:
|
| 104 |
+
FLOPs: 2337073152
|
| 105 |
+
Parameters: 11690000
|
| 106 |
+
File Size: 46827520
|
| 107 |
+
Architecture:
|
| 108 |
+
- 1x1 Convolution
|
| 109 |
+
- Batch Normalization
|
| 110 |
+
- Bottleneck Residual Block
|
| 111 |
+
- Convolution
|
| 112 |
+
- Global Average Pooling
|
| 113 |
+
- Max Pooling
|
| 114 |
+
- ReLU
|
| 115 |
+
- Residual Block
|
| 116 |
+
- Residual Connection
|
| 117 |
+
- Softmax
|
| 118 |
+
Tasks:
|
| 119 |
+
- Image Classification
|
| 120 |
+
Training Data:
|
| 121 |
+
- ImageNet
|
| 122 |
+
ID: resnet18
|
| 123 |
+
Crop Pct: '0.875'
|
| 124 |
+
Image Size: '224'
|
| 125 |
+
Interpolation: bilinear
|
| 126 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641
|
| 127 |
+
Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth
|
| 128 |
+
Results:
|
| 129 |
+
- Task: Image Classification
|
| 130 |
+
Dataset: ImageNet
|
| 131 |
+
Metrics:
|
| 132 |
+
Top 1 Accuracy: 69.74%
|
| 133 |
+
Top 5 Accuracy: 89.09%
|
| 134 |
+
- Name: resnet26
|
| 135 |
+
In Collection: ResNet
|
| 136 |
+
Metadata:
|
| 137 |
+
FLOPs: 3026804736
|
| 138 |
+
Parameters: 16000000
|
| 139 |
+
File Size: 64129972
|
| 140 |
+
Architecture:
|
| 141 |
+
- 1x1 Convolution
|
| 142 |
+
- Batch Normalization
|
| 143 |
+
- Bottleneck Residual Block
|
| 144 |
+
- Convolution
|
| 145 |
+
- Global Average Pooling
|
| 146 |
+
- Max Pooling
|
| 147 |
+
- ReLU
|
| 148 |
+
- Residual Block
|
| 149 |
+
- Residual Connection
|
| 150 |
+
- Softmax
|
| 151 |
+
Tasks:
|
| 152 |
+
- Image Classification
|
| 153 |
+
Training Data:
|
| 154 |
+
- ImageNet
|
| 155 |
+
ID: resnet26
|
| 156 |
+
Crop Pct: '0.875'
|
| 157 |
+
Image Size: '224'
|
| 158 |
+
Interpolation: bicubic
|
| 159 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675
|
| 160 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth
|
| 161 |
+
Results:
|
| 162 |
+
- Task: Image Classification
|
| 163 |
+
Dataset: ImageNet
|
| 164 |
+
Metrics:
|
| 165 |
+
Top 1 Accuracy: 75.29%
|
| 166 |
+
Top 5 Accuracy: 92.57%
|
| 167 |
+
- Name: resnet34
|
| 168 |
+
In Collection: ResNet
|
| 169 |
+
Metadata:
|
| 170 |
+
FLOPs: 4718469120
|
| 171 |
+
Parameters: 21800000
|
| 172 |
+
File Size: 87290831
|
| 173 |
+
Architecture:
|
| 174 |
+
- 1x1 Convolution
|
| 175 |
+
- Batch Normalization
|
| 176 |
+
- Bottleneck Residual Block
|
| 177 |
+
- Convolution
|
| 178 |
+
- Global Average Pooling
|
| 179 |
+
- Max Pooling
|
| 180 |
+
- ReLU
|
| 181 |
+
- Residual Block
|
| 182 |
+
- Residual Connection
|
| 183 |
+
- Softmax
|
| 184 |
+
Tasks:
|
| 185 |
+
- Image Classification
|
| 186 |
+
Training Data:
|
| 187 |
+
- ImageNet
|
| 188 |
+
ID: resnet34
|
| 189 |
+
Crop Pct: '0.875'
|
| 190 |
+
Image Size: '224'
|
| 191 |
+
Interpolation: bilinear
|
| 192 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658
|
| 193 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth
|
| 194 |
+
Results:
|
| 195 |
+
- Task: Image Classification
|
| 196 |
+
Dataset: ImageNet
|
| 197 |
+
Metrics:
|
| 198 |
+
Top 1 Accuracy: 75.11%
|
| 199 |
+
Top 5 Accuracy: 92.28%
|
| 200 |
+
- Name: resnet50
|
| 201 |
+
In Collection: ResNet
|
| 202 |
+
Metadata:
|
| 203 |
+
FLOPs: 5282531328
|
| 204 |
+
Parameters: 25560000
|
| 205 |
+
File Size: 102488165
|
| 206 |
+
Architecture:
|
| 207 |
+
- 1x1 Convolution
|
| 208 |
+
- Batch Normalization
|
| 209 |
+
- Bottleneck Residual Block
|
| 210 |
+
- Convolution
|
| 211 |
+
- Global Average Pooling
|
| 212 |
+
- Max Pooling
|
| 213 |
+
- ReLU
|
| 214 |
+
- Residual Block
|
| 215 |
+
- Residual Connection
|
| 216 |
+
- Softmax
|
| 217 |
+
Tasks:
|
| 218 |
+
- Image Classification
|
| 219 |
+
Training Data:
|
| 220 |
+
- ImageNet
|
| 221 |
+
ID: resnet50
|
| 222 |
+
Crop Pct: '0.875'
|
| 223 |
+
Image Size: '224'
|
| 224 |
+
Interpolation: bicubic
|
| 225 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691
|
| 226 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth
|
| 227 |
+
Results:
|
| 228 |
+
- Task: Image Classification
|
| 229 |
+
Dataset: ImageNet
|
| 230 |
+
Metrics:
|
| 231 |
+
Top 1 Accuracy: 79.04%
|
| 232 |
+
Top 5 Accuracy: 94.39%
|
| 233 |
+
- Name: resnetblur50
|
| 234 |
+
In Collection: ResNet
|
| 235 |
+
Metadata:
|
| 236 |
+
FLOPs: 6621606912
|
| 237 |
+
Parameters: 25560000
|
| 238 |
+
File Size: 102488165
|
| 239 |
+
Architecture:
|
| 240 |
+
- 1x1 Convolution
|
| 241 |
+
- Batch Normalization
|
| 242 |
+
- Blur Pooling
|
| 243 |
+
- Bottleneck Residual Block
|
| 244 |
+
- Convolution
|
| 245 |
+
- Global Average Pooling
|
| 246 |
+
- Max Pooling
|
| 247 |
+
- ReLU
|
| 248 |
+
- Residual Block
|
| 249 |
+
- Residual Connection
|
| 250 |
+
- Softmax
|
| 251 |
+
Tasks:
|
| 252 |
+
- Image Classification
|
| 253 |
+
Training Data:
|
| 254 |
+
- ImageNet
|
| 255 |
+
ID: resnetblur50
|
| 256 |
+
Crop Pct: '0.875'
|
| 257 |
+
Image Size: '224'
|
| 258 |
+
Interpolation: bicubic
|
| 259 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160
|
| 260 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth
|
| 261 |
+
Results:
|
| 262 |
+
- Task: Image Classification
|
| 263 |
+
Dataset: ImageNet
|
| 264 |
+
Metrics:
|
| 265 |
+
Top 1 Accuracy: 79.29%
|
| 266 |
+
Top 5 Accuracy: 94.64%
|
| 267 |
+
- Name: tv_resnet101
|
| 268 |
+
In Collection: ResNet
|
| 269 |
+
Metadata:
|
| 270 |
+
FLOPs: 10068547584
|
| 271 |
+
Parameters: 44550000
|
| 272 |
+
File Size: 178728960
|
| 273 |
+
Architecture:
|
| 274 |
+
- 1x1 Convolution
|
| 275 |
+
- Batch Normalization
|
| 276 |
+
- Bottleneck Residual Block
|
| 277 |
+
- Convolution
|
| 278 |
+
- Global Average Pooling
|
| 279 |
+
- Max Pooling
|
| 280 |
+
- ReLU
|
| 281 |
+
- Residual Block
|
| 282 |
+
- Residual Connection
|
| 283 |
+
- Softmax
|
| 284 |
+
Tasks:
|
| 285 |
+
- Image Classification
|
| 286 |
+
Training Techniques:
|
| 287 |
+
- SGD with Momentum
|
| 288 |
+
- Weight Decay
|
| 289 |
+
Training Data:
|
| 290 |
+
- ImageNet
|
| 291 |
+
ID: tv_resnet101
|
| 292 |
+
LR: 0.1
|
| 293 |
+
Epochs: 90
|
| 294 |
+
Crop Pct: '0.875'
|
| 295 |
+
LR Gamma: 0.1
|
| 296 |
+
Momentum: 0.9
|
| 297 |
+
Batch Size: 32
|
| 298 |
+
Image Size: '224'
|
| 299 |
+
LR Step Size: 30
|
| 300 |
+
Weight Decay: 0.0001
|
| 301 |
+
Interpolation: bilinear
|
| 302 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761
|
| 303 |
+
Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
|
| 304 |
+
Results:
|
| 305 |
+
- Task: Image Classification
|
| 306 |
+
Dataset: ImageNet
|
| 307 |
+
Metrics:
|
| 308 |
+
Top 1 Accuracy: 77.37%
|
| 309 |
+
Top 5 Accuracy: 93.56%
|
| 310 |
+
- Name: tv_resnet152
|
| 311 |
+
In Collection: ResNet
|
| 312 |
+
Metadata:
|
| 313 |
+
FLOPs: 14857660416
|
| 314 |
+
Parameters: 60190000
|
| 315 |
+
File Size: 241530880
|
| 316 |
+
Architecture:
|
| 317 |
+
- 1x1 Convolution
|
| 318 |
+
- Batch Normalization
|
| 319 |
+
- Bottleneck Residual Block
|
| 320 |
+
- Convolution
|
| 321 |
+
- Global Average Pooling
|
| 322 |
+
- Max Pooling
|
| 323 |
+
- ReLU
|
| 324 |
+
- Residual Block
|
| 325 |
+
- Residual Connection
|
| 326 |
+
- Softmax
|
| 327 |
+
Tasks:
|
| 328 |
+
- Image Classification
|
| 329 |
+
Training Techniques:
|
| 330 |
+
- SGD with Momentum
|
| 331 |
+
- Weight Decay
|
| 332 |
+
Training Data:
|
| 333 |
+
- ImageNet
|
| 334 |
+
ID: tv_resnet152
|
| 335 |
+
LR: 0.1
|
| 336 |
+
Epochs: 90
|
| 337 |
+
Crop Pct: '0.875'
|
| 338 |
+
LR Gamma: 0.1
|
| 339 |
+
Momentum: 0.9
|
| 340 |
+
Batch Size: 32
|
| 341 |
+
Image Size: '224'
|
| 342 |
+
LR Step Size: 30
|
| 343 |
+
Weight Decay: 0.0001
|
| 344 |
+
Interpolation: bilinear
|
| 345 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769
|
| 346 |
+
Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth
|
| 347 |
+
Results:
|
| 348 |
+
- Task: Image Classification
|
| 349 |
+
Dataset: ImageNet
|
| 350 |
+
Metrics:
|
| 351 |
+
Top 1 Accuracy: 78.32%
|
| 352 |
+
Top 5 Accuracy: 94.05%
|
| 353 |
+
- Name: tv_resnet34
|
| 354 |
+
In Collection: ResNet
|
| 355 |
+
Metadata:
|
| 356 |
+
FLOPs: 4718469120
|
| 357 |
+
Parameters: 21800000
|
| 358 |
+
File Size: 87306240
|
| 359 |
+
Architecture:
|
| 360 |
+
- 1x1 Convolution
|
| 361 |
+
- Batch Normalization
|
| 362 |
+
- Bottleneck Residual Block
|
| 363 |
+
- Convolution
|
| 364 |
+
- Global Average Pooling
|
| 365 |
+
- Max Pooling
|
| 366 |
+
- ReLU
|
| 367 |
+
- Residual Block
|
| 368 |
+
- Residual Connection
|
| 369 |
+
- Softmax
|
| 370 |
+
Tasks:
|
| 371 |
+
- Image Classification
|
| 372 |
+
Training Techniques:
|
| 373 |
+
- SGD with Momentum
|
| 374 |
+
- Weight Decay
|
| 375 |
+
Training Data:
|
| 376 |
+
- ImageNet
|
| 377 |
+
ID: tv_resnet34
|
| 378 |
+
LR: 0.1
|
| 379 |
+
Epochs: 90
|
| 380 |
+
Crop Pct: '0.875'
|
| 381 |
+
LR Gamma: 0.1
|
| 382 |
+
Momentum: 0.9
|
| 383 |
+
Batch Size: 32
|
| 384 |
+
Image Size: '224'
|
| 385 |
+
LR Step Size: 30
|
| 386 |
+
Weight Decay: 0.0001
|
| 387 |
+
Interpolation: bilinear
|
| 388 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745
|
| 389 |
+
Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth
|
| 390 |
+
Results:
|
| 391 |
+
- Task: Image Classification
|
| 392 |
+
Dataset: ImageNet
|
| 393 |
+
Metrics:
|
| 394 |
+
Top 1 Accuracy: 73.3%
|
| 395 |
+
Top 5 Accuracy: 91.42%
|
| 396 |
+
- Name: tv_resnet50
|
| 397 |
+
In Collection: ResNet
|
| 398 |
+
Metadata:
|
| 399 |
+
FLOPs: 5282531328
|
| 400 |
+
Parameters: 25560000
|
| 401 |
+
File Size: 102502400
|
| 402 |
+
Architecture:
|
| 403 |
+
- 1x1 Convolution
|
| 404 |
+
- Batch Normalization
|
| 405 |
+
- Bottleneck Residual Block
|
| 406 |
+
- Convolution
|
| 407 |
+
- Global Average Pooling
|
| 408 |
+
- Max Pooling
|
| 409 |
+
- ReLU
|
| 410 |
+
- Residual Block
|
| 411 |
+
- Residual Connection
|
| 412 |
+
- Softmax
|
| 413 |
+
Tasks:
|
| 414 |
+
- Image Classification
|
| 415 |
+
Training Techniques:
|
| 416 |
+
- SGD with Momentum
|
| 417 |
+
- Weight Decay
|
| 418 |
+
Training Data:
|
| 419 |
+
- ImageNet
|
| 420 |
+
ID: tv_resnet50
|
| 421 |
+
LR: 0.1
|
| 422 |
+
Epochs: 90
|
| 423 |
+
Crop Pct: '0.875'
|
| 424 |
+
LR Gamma: 0.1
|
| 425 |
+
Momentum: 0.9
|
| 426 |
+
Batch Size: 32
|
| 427 |
+
Image Size: '224'
|
| 428 |
+
LR Step Size: 30
|
| 429 |
+
Weight Decay: 0.0001
|
| 430 |
+
Interpolation: bilinear
|
| 431 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753
|
| 432 |
+
Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth
|
| 433 |
+
Results:
|
| 434 |
+
- Task: Image Classification
|
| 435 |
+
Dataset: ImageNet
|
| 436 |
+
Metrics:
|
| 437 |
+
Top 1 Accuracy: 76.16%
|
| 438 |
+
Top 5 Accuracy: 92.88%
|
| 439 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/resnext.md
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ResNeXt
|
| 2 |
+
|
| 3 |
+
A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('resnext101_32x8d', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{DBLP:journals/corr/XieGDTH16,
|
| 76 |
+
author = {Saining Xie and
|
| 77 |
+
Ross B. Girshick and
|
| 78 |
+
Piotr Doll{\'{a}}r and
|
| 79 |
+
Zhuowen Tu and
|
| 80 |
+
Kaiming He},
|
| 81 |
+
title = {Aggregated Residual Transformations for Deep Neural Networks},
|
| 82 |
+
journal = {CoRR},
|
| 83 |
+
volume = {abs/1611.05431},
|
| 84 |
+
year = {2016},
|
| 85 |
+
url = {http://arxiv.org/abs/1611.05431},
|
| 86 |
+
archivePrefix = {arXiv},
|
| 87 |
+
eprint = {1611.05431},
|
| 88 |
+
timestamp = {Mon, 13 Aug 2018 16:45:58 +0200},
|
| 89 |
+
biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib},
|
| 90 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 91 |
+
}
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
<!--
|
| 95 |
+
Type: model-index
|
| 96 |
+
Collections:
|
| 97 |
+
- Name: ResNeXt
|
| 98 |
+
Paper:
|
| 99 |
+
Title: Aggregated Residual Transformations for Deep Neural Networks
|
| 100 |
+
URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep
|
| 101 |
+
Models:
|
| 102 |
+
- Name: resnext101_32x8d
|
| 103 |
+
In Collection: ResNeXt
|
| 104 |
+
Metadata:
|
| 105 |
+
FLOPs: 21180417024
|
| 106 |
+
Parameters: 88790000
|
| 107 |
+
File Size: 356082095
|
| 108 |
+
Architecture:
|
| 109 |
+
- 1x1 Convolution
|
| 110 |
+
- Batch Normalization
|
| 111 |
+
- Convolution
|
| 112 |
+
- Global Average Pooling
|
| 113 |
+
- Grouped Convolution
|
| 114 |
+
- Max Pooling
|
| 115 |
+
- ReLU
|
| 116 |
+
- ResNeXt Block
|
| 117 |
+
- Residual Connection
|
| 118 |
+
- Softmax
|
| 119 |
+
Tasks:
|
| 120 |
+
- Image Classification
|
| 121 |
+
Training Data:
|
| 122 |
+
- ImageNet
|
| 123 |
+
ID: resnext101_32x8d
|
| 124 |
+
Crop Pct: '0.875'
|
| 125 |
+
Image Size: '224'
|
| 126 |
+
Interpolation: bilinear
|
| 127 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L877
|
| 128 |
+
Weights: https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
|
| 129 |
+
Results:
|
| 130 |
+
- Task: Image Classification
|
| 131 |
+
Dataset: ImageNet
|
| 132 |
+
Metrics:
|
| 133 |
+
Top 1 Accuracy: 79.3%
|
| 134 |
+
Top 5 Accuracy: 94.53%
|
| 135 |
+
- Name: resnext50_32x4d
|
| 136 |
+
In Collection: ResNeXt
|
| 137 |
+
Metadata:
|
| 138 |
+
FLOPs: 5472648192
|
| 139 |
+
Parameters: 25030000
|
| 140 |
+
File Size: 100435887
|
| 141 |
+
Architecture:
|
| 142 |
+
- 1x1 Convolution
|
| 143 |
+
- Batch Normalization
|
| 144 |
+
- Convolution
|
| 145 |
+
- Global Average Pooling
|
| 146 |
+
- Grouped Convolution
|
| 147 |
+
- Max Pooling
|
| 148 |
+
- ReLU
|
| 149 |
+
- ResNeXt Block
|
| 150 |
+
- Residual Connection
|
| 151 |
+
- Softmax
|
| 152 |
+
Tasks:
|
| 153 |
+
- Image Classification
|
| 154 |
+
Training Data:
|
| 155 |
+
- ImageNet
|
| 156 |
+
ID: resnext50_32x4d
|
| 157 |
+
Crop Pct: '0.875'
|
| 158 |
+
Image Size: '224'
|
| 159 |
+
Interpolation: bicubic
|
| 160 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L851
|
| 161 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth
|
| 162 |
+
Results:
|
| 163 |
+
- Task: Image Classification
|
| 164 |
+
Dataset: ImageNet
|
| 165 |
+
Metrics:
|
| 166 |
+
Top 1 Accuracy: 79.79%
|
| 167 |
+
Top 5 Accuracy: 94.61%
|
| 168 |
+
- Name: resnext50d_32x4d
|
| 169 |
+
In Collection: ResNeXt
|
| 170 |
+
Metadata:
|
| 171 |
+
FLOPs: 5781119488
|
| 172 |
+
Parameters: 25050000
|
| 173 |
+
File Size: 100515304
|
| 174 |
+
Architecture:
|
| 175 |
+
- 1x1 Convolution
|
| 176 |
+
- Batch Normalization
|
| 177 |
+
- Convolution
|
| 178 |
+
- Global Average Pooling
|
| 179 |
+
- Grouped Convolution
|
| 180 |
+
- Max Pooling
|
| 181 |
+
- ReLU
|
| 182 |
+
- ResNeXt Block
|
| 183 |
+
- Residual Connection
|
| 184 |
+
- Softmax
|
| 185 |
+
Tasks:
|
| 186 |
+
- Image Classification
|
| 187 |
+
Training Data:
|
| 188 |
+
- ImageNet
|
| 189 |
+
ID: resnext50d_32x4d
|
| 190 |
+
Crop Pct: '0.875'
|
| 191 |
+
Image Size: '224'
|
| 192 |
+
Interpolation: bicubic
|
| 193 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L869
|
| 194 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth
|
| 195 |
+
Results:
|
| 196 |
+
- Task: Image Classification
|
| 197 |
+
Dataset: ImageNet
|
| 198 |
+
Metrics:
|
| 199 |
+
Top 1 Accuracy: 79.67%
|
| 200 |
+
Top 5 Accuracy: 94.87%
|
| 201 |
+
- Name: tv_resnext50_32x4d
|
| 202 |
+
In Collection: ResNeXt
|
| 203 |
+
Metadata:
|
| 204 |
+
FLOPs: 5472648192
|
| 205 |
+
Parameters: 25030000
|
| 206 |
+
File Size: 100441675
|
| 207 |
+
Architecture:
|
| 208 |
+
- 1x1 Convolution
|
| 209 |
+
- Batch Normalization
|
| 210 |
+
- Convolution
|
| 211 |
+
- Global Average Pooling
|
| 212 |
+
- Grouped Convolution
|
| 213 |
+
- Max Pooling
|
| 214 |
+
- ReLU
|
| 215 |
+
- ResNeXt Block
|
| 216 |
+
- Residual Connection
|
| 217 |
+
- Softmax
|
| 218 |
+
Tasks:
|
| 219 |
+
- Image Classification
|
| 220 |
+
Training Techniques:
|
| 221 |
+
- SGD with Momentum
|
| 222 |
+
- Weight Decay
|
| 223 |
+
Training Data:
|
| 224 |
+
- ImageNet
|
| 225 |
+
ID: tv_resnext50_32x4d
|
| 226 |
+
LR: 0.1
|
| 227 |
+
Epochs: 90
|
| 228 |
+
Crop Pct: '0.875'
|
| 229 |
+
LR Gamma: 0.1
|
| 230 |
+
Momentum: 0.9
|
| 231 |
+
Batch Size: 32
|
| 232 |
+
Image Size: '224'
|
| 233 |
+
LR Step Size: 30
|
| 234 |
+
Weight Decay: 0.0001
|
| 235 |
+
Interpolation: bilinear
|
| 236 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L842
|
| 237 |
+
Weights: https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
|
| 238 |
+
Results:
|
| 239 |
+
- Task: Image Classification
|
| 240 |
+
Dataset: ImageNet
|
| 241 |
+
Metrics:
|
| 242 |
+
Top 1 Accuracy: 77.61%
|
| 243 |
+
Top 5 Accuracy: 93.68%
|
| 244 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/rexnet.md
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# RexNet
|
| 2 |
+
|
| 3 |
+
**Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6).
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('rexnet_100', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `rexnet_100`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('rexnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{han2020rexnet,
|
| 76 |
+
title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network},
|
| 77 |
+
author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo},
|
| 78 |
+
year={2020},
|
| 79 |
+
eprint={2007.00992},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: RexNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: 'ReXNet: Diminishing Representational Bottleneck on Convolutional Neural
|
| 91 |
+
Network'
|
| 92 |
+
URL: https://paperswithcode.com/paper/rexnet-diminishing-representational
|
| 93 |
+
Models:
|
| 94 |
+
- Name: rexnet_100
|
| 95 |
+
In Collection: RexNet
|
| 96 |
+
Metadata:
|
| 97 |
+
FLOPs: 509989377
|
| 98 |
+
Parameters: 4800000
|
| 99 |
+
File Size: 19417552
|
| 100 |
+
Architecture:
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Convolution
|
| 103 |
+
- Dropout
|
| 104 |
+
- ReLU6
|
| 105 |
+
- Residual Connection
|
| 106 |
+
Tasks:
|
| 107 |
+
- Image Classification
|
| 108 |
+
Training Techniques:
|
| 109 |
+
- Label Smoothing
|
| 110 |
+
- Linear Warmup With Cosine Annealing
|
| 111 |
+
- Nesterov Accelerated Gradient
|
| 112 |
+
- Weight Decay
|
| 113 |
+
Training Data:
|
| 114 |
+
- ImageNet
|
| 115 |
+
Training Resources: 4x NVIDIA V100 GPUs
|
| 116 |
+
ID: rexnet_100
|
| 117 |
+
LR: 0.5
|
| 118 |
+
Epochs: 400
|
| 119 |
+
Dropout: 0.2
|
| 120 |
+
Crop Pct: '0.875'
|
| 121 |
+
Momentum: 0.9
|
| 122 |
+
Batch Size: 512
|
| 123 |
+
Image Size: '224'
|
| 124 |
+
Weight Decay: 1.0e-05
|
| 125 |
+
Interpolation: bicubic
|
| 126 |
+
Label Smoothing: 0.1
|
| 127 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L212
|
| 128 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth
|
| 129 |
+
Results:
|
| 130 |
+
- Task: Image Classification
|
| 131 |
+
Dataset: ImageNet
|
| 132 |
+
Metrics:
|
| 133 |
+
Top 1 Accuracy: 77.86%
|
| 134 |
+
Top 5 Accuracy: 93.88%
|
| 135 |
+
- Name: rexnet_130
|
| 136 |
+
In Collection: RexNet
|
| 137 |
+
Metadata:
|
| 138 |
+
FLOPs: 848364461
|
| 139 |
+
Parameters: 7560000
|
| 140 |
+
File Size: 30508197
|
| 141 |
+
Architecture:
|
| 142 |
+
- Batch Normalization
|
| 143 |
+
- Convolution
|
| 144 |
+
- Dropout
|
| 145 |
+
- ReLU6
|
| 146 |
+
- Residual Connection
|
| 147 |
+
Tasks:
|
| 148 |
+
- Image Classification
|
| 149 |
+
Training Techniques:
|
| 150 |
+
- Label Smoothing
|
| 151 |
+
- Linear Warmup With Cosine Annealing
|
| 152 |
+
- Nesterov Accelerated Gradient
|
| 153 |
+
- Weight Decay
|
| 154 |
+
Training Data:
|
| 155 |
+
- ImageNet
|
| 156 |
+
Training Resources: 4x NVIDIA V100 GPUs
|
| 157 |
+
ID: rexnet_130
|
| 158 |
+
LR: 0.5
|
| 159 |
+
Epochs: 400
|
| 160 |
+
Dropout: 0.2
|
| 161 |
+
Crop Pct: '0.875'
|
| 162 |
+
Momentum: 0.9
|
| 163 |
+
Batch Size: 512
|
| 164 |
+
Image Size: '224'
|
| 165 |
+
Weight Decay: 1.0e-05
|
| 166 |
+
Interpolation: bicubic
|
| 167 |
+
Label Smoothing: 0.1
|
| 168 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L218
|
| 169 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth
|
| 170 |
+
Results:
|
| 171 |
+
- Task: Image Classification
|
| 172 |
+
Dataset: ImageNet
|
| 173 |
+
Metrics:
|
| 174 |
+
Top 1 Accuracy: 79.49%
|
| 175 |
+
Top 5 Accuracy: 94.67%
|
| 176 |
+
- Name: rexnet_150
|
| 177 |
+
In Collection: RexNet
|
| 178 |
+
Metadata:
|
| 179 |
+
FLOPs: 1122374469
|
| 180 |
+
Parameters: 9730000
|
| 181 |
+
File Size: 39227315
|
| 182 |
+
Architecture:
|
| 183 |
+
- Batch Normalization
|
| 184 |
+
- Convolution
|
| 185 |
+
- Dropout
|
| 186 |
+
- ReLU6
|
| 187 |
+
- Residual Connection
|
| 188 |
+
Tasks:
|
| 189 |
+
- Image Classification
|
| 190 |
+
Training Techniques:
|
| 191 |
+
- Label Smoothing
|
| 192 |
+
- Linear Warmup With Cosine Annealing
|
| 193 |
+
- Nesterov Accelerated Gradient
|
| 194 |
+
- Weight Decay
|
| 195 |
+
Training Data:
|
| 196 |
+
- ImageNet
|
| 197 |
+
Training Resources: 4x NVIDIA V100 GPUs
|
| 198 |
+
ID: rexnet_150
|
| 199 |
+
LR: 0.5
|
| 200 |
+
Epochs: 400
|
| 201 |
+
Dropout: 0.2
|
| 202 |
+
Crop Pct: '0.875'
|
| 203 |
+
Momentum: 0.9
|
| 204 |
+
Batch Size: 512
|
| 205 |
+
Image Size: '224'
|
| 206 |
+
Weight Decay: 1.0e-05
|
| 207 |
+
Interpolation: bicubic
|
| 208 |
+
Label Smoothing: 0.1
|
| 209 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L224
|
| 210 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth
|
| 211 |
+
Results:
|
| 212 |
+
- Task: Image Classification
|
| 213 |
+
Dataset: ImageNet
|
| 214 |
+
Metrics:
|
| 215 |
+
Top 1 Accuracy: 80.31%
|
| 216 |
+
Top 5 Accuracy: 95.16%
|
| 217 |
+
- Name: rexnet_200
|
| 218 |
+
In Collection: RexNet
|
| 219 |
+
Metadata:
|
| 220 |
+
FLOPs: 1960224938
|
| 221 |
+
Parameters: 16370000
|
| 222 |
+
File Size: 65862221
|
| 223 |
+
Architecture:
|
| 224 |
+
- Batch Normalization
|
| 225 |
+
- Convolution
|
| 226 |
+
- Dropout
|
| 227 |
+
- ReLU6
|
| 228 |
+
- Residual Connection
|
| 229 |
+
Tasks:
|
| 230 |
+
- Image Classification
|
| 231 |
+
Training Techniques:
|
| 232 |
+
- Label Smoothing
|
| 233 |
+
- Linear Warmup With Cosine Annealing
|
| 234 |
+
- Nesterov Accelerated Gradient
|
| 235 |
+
- Weight Decay
|
| 236 |
+
Training Data:
|
| 237 |
+
- ImageNet
|
| 238 |
+
Training Resources: 4x NVIDIA V100 GPUs
|
| 239 |
+
ID: rexnet_200
|
| 240 |
+
LR: 0.5
|
| 241 |
+
Epochs: 400
|
| 242 |
+
Dropout: 0.2
|
| 243 |
+
Crop Pct: '0.875'
|
| 244 |
+
Momentum: 0.9
|
| 245 |
+
Batch Size: 512
|
| 246 |
+
Image Size: '224'
|
| 247 |
+
Weight Decay: 1.0e-05
|
| 248 |
+
Interpolation: bicubic
|
| 249 |
+
Label Smoothing: 0.1
|
| 250 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L230
|
| 251 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth
|
| 252 |
+
Results:
|
| 253 |
+
- Task: Image Classification
|
| 254 |
+
Dataset: ImageNet
|
| 255 |
+
Metrics:
|
| 256 |
+
Top 1 Accuracy: 81.63%
|
| 257 |
+
Top 5 Accuracy: 95.67%
|
| 258 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/se-resnet.md
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SE-ResNet
|
| 2 |
+
|
| 3 |
+
**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('seresnet152d', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `seresnet152d`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('seresnet152d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{hu2019squeezeandexcitation,
|
| 76 |
+
title={Squeeze-and-Excitation Networks},
|
| 77 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1709.01507},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: SE ResNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Squeeze-and-Excitation Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: seresnet152d
|
| 94 |
+
In Collection: SE ResNet
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 20161904304
|
| 97 |
+
Parameters: 66840000
|
| 98 |
+
File Size: 268144497
|
| 99 |
+
Architecture:
|
| 100 |
+
- 1x1 Convolution
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Bottleneck Residual Block
|
| 103 |
+
- Convolution
|
| 104 |
+
- Global Average Pooling
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- ReLU
|
| 107 |
+
- Residual Block
|
| 108 |
+
- Residual Connection
|
| 109 |
+
- Softmax
|
| 110 |
+
- Squeeze-and-Excitation Block
|
| 111 |
+
Tasks:
|
| 112 |
+
- Image Classification
|
| 113 |
+
Training Techniques:
|
| 114 |
+
- Label Smoothing
|
| 115 |
+
- SGD with Momentum
|
| 116 |
+
- Weight Decay
|
| 117 |
+
Training Data:
|
| 118 |
+
- ImageNet
|
| 119 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 120 |
+
ID: seresnet152d
|
| 121 |
+
LR: 0.6
|
| 122 |
+
Epochs: 100
|
| 123 |
+
Layers: 152
|
| 124 |
+
Dropout: 0.2
|
| 125 |
+
Crop Pct: '0.94'
|
| 126 |
+
Momentum: 0.9
|
| 127 |
+
Batch Size: 1024
|
| 128 |
+
Image Size: '256'
|
| 129 |
+
Interpolation: bicubic
|
| 130 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1206
|
| 131 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth
|
| 132 |
+
Results:
|
| 133 |
+
- Task: Image Classification
|
| 134 |
+
Dataset: ImageNet
|
| 135 |
+
Metrics:
|
| 136 |
+
Top 1 Accuracy: 83.74%
|
| 137 |
+
Top 5 Accuracy: 96.77%
|
| 138 |
+
- Name: seresnet50
|
| 139 |
+
In Collection: SE ResNet
|
| 140 |
+
Metadata:
|
| 141 |
+
FLOPs: 5285062320
|
| 142 |
+
Parameters: 28090000
|
| 143 |
+
File Size: 112621903
|
| 144 |
+
Architecture:
|
| 145 |
+
- 1x1 Convolution
|
| 146 |
+
- Batch Normalization
|
| 147 |
+
- Bottleneck Residual Block
|
| 148 |
+
- Convolution
|
| 149 |
+
- Global Average Pooling
|
| 150 |
+
- Max Pooling
|
| 151 |
+
- ReLU
|
| 152 |
+
- Residual Block
|
| 153 |
+
- Residual Connection
|
| 154 |
+
- Softmax
|
| 155 |
+
- Squeeze-and-Excitation Block
|
| 156 |
+
Tasks:
|
| 157 |
+
- Image Classification
|
| 158 |
+
Training Techniques:
|
| 159 |
+
- Label Smoothing
|
| 160 |
+
- SGD with Momentum
|
| 161 |
+
- Weight Decay
|
| 162 |
+
Training Data:
|
| 163 |
+
- ImageNet
|
| 164 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 165 |
+
ID: seresnet50
|
| 166 |
+
LR: 0.6
|
| 167 |
+
Epochs: 100
|
| 168 |
+
Layers: 50
|
| 169 |
+
Dropout: 0.2
|
| 170 |
+
Crop Pct: '0.875'
|
| 171 |
+
Momentum: 0.9
|
| 172 |
+
Batch Size: 1024
|
| 173 |
+
Image Size: '224'
|
| 174 |
+
Interpolation: bicubic
|
| 175 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1180
|
| 176 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth
|
| 177 |
+
Results:
|
| 178 |
+
- Task: Image Classification
|
| 179 |
+
Dataset: ImageNet
|
| 180 |
+
Metrics:
|
| 181 |
+
Top 1 Accuracy: 80.26%
|
| 182 |
+
Top 5 Accuracy: 95.07%
|
| 183 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/selecsls.md
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SelecSLS
|
| 2 |
+
|
| 3 |
+
**SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('selecsls42b', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@article{Mehta_2020,
|
| 76 |
+
title={XNect},
|
| 77 |
+
volume={39},
|
| 78 |
+
ISSN={1557-7368},
|
| 79 |
+
url={http://dx.doi.org/10.1145/3386569.3392410},
|
| 80 |
+
DOI={10.1145/3386569.3392410},
|
| 81 |
+
number={4},
|
| 82 |
+
journal={ACM Transactions on Graphics},
|
| 83 |
+
publisher={Association for Computing Machinery (ACM)},
|
| 84 |
+
author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian},
|
| 85 |
+
year={2020},
|
| 86 |
+
month={Jul}
|
| 87 |
+
}
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
<!--
|
| 91 |
+
Type: model-index
|
| 92 |
+
Collections:
|
| 93 |
+
- Name: SelecSLS
|
| 94 |
+
Paper:
|
| 95 |
+
Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera'
|
| 96 |
+
URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose
|
| 97 |
+
Models:
|
| 98 |
+
- Name: selecsls42b
|
| 99 |
+
In Collection: SelecSLS
|
| 100 |
+
Metadata:
|
| 101 |
+
FLOPs: 3824022528
|
| 102 |
+
Parameters: 32460000
|
| 103 |
+
File Size: 129948954
|
| 104 |
+
Architecture:
|
| 105 |
+
- Batch Normalization
|
| 106 |
+
- Convolution
|
| 107 |
+
- Dense Connections
|
| 108 |
+
- Dropout
|
| 109 |
+
- Global Average Pooling
|
| 110 |
+
- ReLU
|
| 111 |
+
- SelecSLS Block
|
| 112 |
+
Tasks:
|
| 113 |
+
- Image Classification
|
| 114 |
+
Training Techniques:
|
| 115 |
+
- Cosine Annealing
|
| 116 |
+
- Random Erasing
|
| 117 |
+
Training Data:
|
| 118 |
+
- ImageNet
|
| 119 |
+
ID: selecsls42b
|
| 120 |
+
Crop Pct: '0.875'
|
| 121 |
+
Image Size: '224'
|
| 122 |
+
Interpolation: bicubic
|
| 123 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335
|
| 124 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth
|
| 125 |
+
Results:
|
| 126 |
+
- Task: Image Classification
|
| 127 |
+
Dataset: ImageNet
|
| 128 |
+
Metrics:
|
| 129 |
+
Top 1 Accuracy: 77.18%
|
| 130 |
+
Top 5 Accuracy: 93.39%
|
| 131 |
+
- Name: selecsls60
|
| 132 |
+
In Collection: SelecSLS
|
| 133 |
+
Metadata:
|
| 134 |
+
FLOPs: 4610472600
|
| 135 |
+
Parameters: 30670000
|
| 136 |
+
File Size: 122839714
|
| 137 |
+
Architecture:
|
| 138 |
+
- Batch Normalization
|
| 139 |
+
- Convolution
|
| 140 |
+
- Dense Connections
|
| 141 |
+
- Dropout
|
| 142 |
+
- Global Average Pooling
|
| 143 |
+
- ReLU
|
| 144 |
+
- SelecSLS Block
|
| 145 |
+
Tasks:
|
| 146 |
+
- Image Classification
|
| 147 |
+
Training Techniques:
|
| 148 |
+
- Cosine Annealing
|
| 149 |
+
- Random Erasing
|
| 150 |
+
Training Data:
|
| 151 |
+
- ImageNet
|
| 152 |
+
ID: selecsls60
|
| 153 |
+
Crop Pct: '0.875'
|
| 154 |
+
Image Size: '224'
|
| 155 |
+
Interpolation: bicubic
|
| 156 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342
|
| 157 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth
|
| 158 |
+
Results:
|
| 159 |
+
- Task: Image Classification
|
| 160 |
+
Dataset: ImageNet
|
| 161 |
+
Metrics:
|
| 162 |
+
Top 1 Accuracy: 77.99%
|
| 163 |
+
Top 5 Accuracy: 93.83%
|
| 164 |
+
- Name: selecsls60b
|
| 165 |
+
In Collection: SelecSLS
|
| 166 |
+
Metadata:
|
| 167 |
+
FLOPs: 4657653144
|
| 168 |
+
Parameters: 32770000
|
| 169 |
+
File Size: 131252898
|
| 170 |
+
Architecture:
|
| 171 |
+
- Batch Normalization
|
| 172 |
+
- Convolution
|
| 173 |
+
- Dense Connections
|
| 174 |
+
- Dropout
|
| 175 |
+
- Global Average Pooling
|
| 176 |
+
- ReLU
|
| 177 |
+
- SelecSLS Block
|
| 178 |
+
Tasks:
|
| 179 |
+
- Image Classification
|
| 180 |
+
Training Techniques:
|
| 181 |
+
- Cosine Annealing
|
| 182 |
+
- Random Erasing
|
| 183 |
+
Training Data:
|
| 184 |
+
- ImageNet
|
| 185 |
+
ID: selecsls60b
|
| 186 |
+
Crop Pct: '0.875'
|
| 187 |
+
Image Size: '224'
|
| 188 |
+
Interpolation: bicubic
|
| 189 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349
|
| 190 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth
|
| 191 |
+
Results:
|
| 192 |
+
- Task: Image Classification
|
| 193 |
+
Dataset: ImageNet
|
| 194 |
+
Metrics:
|
| 195 |
+
Top 1 Accuracy: 78.41%
|
| 196 |
+
Top 5 Accuracy: 94.18%
|
| 197 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/seresnext.md
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SE-ResNeXt
|
| 2 |
+
|
| 3 |
+
**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resneXt) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('seresnext26d_32x4d', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `seresnext26d_32x4d`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('seresnext26d_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{hu2019squeezeandexcitation,
|
| 76 |
+
title={Squeeze-and-Excitation Networks},
|
| 77 |
+
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1709.01507},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: SEResNeXt
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Squeeze-and-Excitation Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: seresnext26d_32x4d
|
| 94 |
+
In Collection: SEResNeXt
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 3507053024
|
| 97 |
+
Parameters: 16810000
|
| 98 |
+
File Size: 67425193
|
| 99 |
+
Architecture:
|
| 100 |
+
- 1x1 Convolution
|
| 101 |
+
- Batch Normalization
|
| 102 |
+
- Convolution
|
| 103 |
+
- Global Average Pooling
|
| 104 |
+
- Grouped Convolution
|
| 105 |
+
- Max Pooling
|
| 106 |
+
- ReLU
|
| 107 |
+
- ResNeXt Block
|
| 108 |
+
- Residual Connection
|
| 109 |
+
- Softmax
|
| 110 |
+
- Squeeze-and-Excitation Block
|
| 111 |
+
Tasks:
|
| 112 |
+
- Image Classification
|
| 113 |
+
Training Techniques:
|
| 114 |
+
- Label Smoothing
|
| 115 |
+
- SGD with Momentum
|
| 116 |
+
- Weight Decay
|
| 117 |
+
Training Data:
|
| 118 |
+
- ImageNet
|
| 119 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 120 |
+
ID: seresnext26d_32x4d
|
| 121 |
+
LR: 0.6
|
| 122 |
+
Epochs: 100
|
| 123 |
+
Layers: 26
|
| 124 |
+
Dropout: 0.2
|
| 125 |
+
Crop Pct: '0.875'
|
| 126 |
+
Momentum: 0.9
|
| 127 |
+
Batch Size: 1024
|
| 128 |
+
Image Size: '224'
|
| 129 |
+
Interpolation: bicubic
|
| 130 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1234
|
| 131 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth
|
| 132 |
+
Results:
|
| 133 |
+
- Task: Image Classification
|
| 134 |
+
Dataset: ImageNet
|
| 135 |
+
Metrics:
|
| 136 |
+
Top 1 Accuracy: 77.59%
|
| 137 |
+
Top 5 Accuracy: 93.61%
|
| 138 |
+
- Name: seresnext26t_32x4d
|
| 139 |
+
In Collection: SEResNeXt
|
| 140 |
+
Metadata:
|
| 141 |
+
FLOPs: 3466436448
|
| 142 |
+
Parameters: 16820000
|
| 143 |
+
File Size: 67414838
|
| 144 |
+
Architecture:
|
| 145 |
+
- 1x1 Convolution
|
| 146 |
+
- Batch Normalization
|
| 147 |
+
- Convolution
|
| 148 |
+
- Global Average Pooling
|
| 149 |
+
- Grouped Convolution
|
| 150 |
+
- Max Pooling
|
| 151 |
+
- ReLU
|
| 152 |
+
- ResNeXt Block
|
| 153 |
+
- Residual Connection
|
| 154 |
+
- Softmax
|
| 155 |
+
- Squeeze-and-Excitation Block
|
| 156 |
+
Tasks:
|
| 157 |
+
- Image Classification
|
| 158 |
+
Training Techniques:
|
| 159 |
+
- Label Smoothing
|
| 160 |
+
- SGD with Momentum
|
| 161 |
+
- Weight Decay
|
| 162 |
+
Training Data:
|
| 163 |
+
- ImageNet
|
| 164 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 165 |
+
ID: seresnext26t_32x4d
|
| 166 |
+
LR: 0.6
|
| 167 |
+
Epochs: 100
|
| 168 |
+
Layers: 26
|
| 169 |
+
Dropout: 0.2
|
| 170 |
+
Crop Pct: '0.875'
|
| 171 |
+
Momentum: 0.9
|
| 172 |
+
Batch Size: 1024
|
| 173 |
+
Image Size: '224'
|
| 174 |
+
Interpolation: bicubic
|
| 175 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1246
|
| 176 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth
|
| 177 |
+
Results:
|
| 178 |
+
- Task: Image Classification
|
| 179 |
+
Dataset: ImageNet
|
| 180 |
+
Metrics:
|
| 181 |
+
Top 1 Accuracy: 77.99%
|
| 182 |
+
Top 5 Accuracy: 93.73%
|
| 183 |
+
- Name: seresnext50_32x4d
|
| 184 |
+
In Collection: SEResNeXt
|
| 185 |
+
Metadata:
|
| 186 |
+
FLOPs: 5475179184
|
| 187 |
+
Parameters: 27560000
|
| 188 |
+
File Size: 110569859
|
| 189 |
+
Architecture:
|
| 190 |
+
- 1x1 Convolution
|
| 191 |
+
- Batch Normalization
|
| 192 |
+
- Convolution
|
| 193 |
+
- Global Average Pooling
|
| 194 |
+
- Grouped Convolution
|
| 195 |
+
- Max Pooling
|
| 196 |
+
- ReLU
|
| 197 |
+
- ResNeXt Block
|
| 198 |
+
- Residual Connection
|
| 199 |
+
- Softmax
|
| 200 |
+
- Squeeze-and-Excitation Block
|
| 201 |
+
Tasks:
|
| 202 |
+
- Image Classification
|
| 203 |
+
Training Techniques:
|
| 204 |
+
- Label Smoothing
|
| 205 |
+
- SGD with Momentum
|
| 206 |
+
- Weight Decay
|
| 207 |
+
Training Data:
|
| 208 |
+
- ImageNet
|
| 209 |
+
Training Resources: 8x NVIDIA Titan X GPUs
|
| 210 |
+
ID: seresnext50_32x4d
|
| 211 |
+
LR: 0.6
|
| 212 |
+
Epochs: 100
|
| 213 |
+
Layers: 50
|
| 214 |
+
Dropout: 0.2
|
| 215 |
+
Crop Pct: '0.875'
|
| 216 |
+
Momentum: 0.9
|
| 217 |
+
Batch Size: 1024
|
| 218 |
+
Image Size: '224'
|
| 219 |
+
Interpolation: bicubic
|
| 220 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1267
|
| 221 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth
|
| 222 |
+
Results:
|
| 223 |
+
- Task: Image Classification
|
| 224 |
+
Dataset: ImageNet
|
| 225 |
+
Metrics:
|
| 226 |
+
Top 1 Accuracy: 81.27%
|
| 227 |
+
Top 5 Accuracy: 95.62%
|
| 228 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/skresnet.md
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SK-ResNet
|
| 2 |
+
|
| 3 |
+
**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('skresnet18', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{li2019selective,
|
| 76 |
+
title={Selective Kernel Networks},
|
| 77 |
+
author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1903.06586},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: SKResNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Selective Kernel Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/selective-kernel-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: skresnet18
|
| 94 |
+
In Collection: SKResNet
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 2333467136
|
| 97 |
+
Parameters: 11960000
|
| 98 |
+
File Size: 47923238
|
| 99 |
+
Architecture:
|
| 100 |
+
- Convolution
|
| 101 |
+
- Dense Connections
|
| 102 |
+
- Global Average Pooling
|
| 103 |
+
- Max Pooling
|
| 104 |
+
- Residual Connection
|
| 105 |
+
- Selective Kernel
|
| 106 |
+
- Softmax
|
| 107 |
+
Tasks:
|
| 108 |
+
- Image Classification
|
| 109 |
+
Training Techniques:
|
| 110 |
+
- SGD with Momentum
|
| 111 |
+
- Weight Decay
|
| 112 |
+
Training Data:
|
| 113 |
+
- ImageNet
|
| 114 |
+
Training Resources: 8x GPUs
|
| 115 |
+
ID: skresnet18
|
| 116 |
+
LR: 0.1
|
| 117 |
+
Epochs: 100
|
| 118 |
+
Layers: 18
|
| 119 |
+
Crop Pct: '0.875'
|
| 120 |
+
Momentum: 0.9
|
| 121 |
+
Batch Size: 256
|
| 122 |
+
Image Size: '224'
|
| 123 |
+
Weight Decay: 4.0e-05
|
| 124 |
+
Interpolation: bicubic
|
| 125 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L148
|
| 126 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth
|
| 127 |
+
Results:
|
| 128 |
+
- Task: Image Classification
|
| 129 |
+
Dataset: ImageNet
|
| 130 |
+
Metrics:
|
| 131 |
+
Top 1 Accuracy: 73.03%
|
| 132 |
+
Top 5 Accuracy: 91.17%
|
| 133 |
+
- Name: skresnet34
|
| 134 |
+
In Collection: SKResNet
|
| 135 |
+
Metadata:
|
| 136 |
+
FLOPs: 4711849952
|
| 137 |
+
Parameters: 22280000
|
| 138 |
+
File Size: 89299314
|
| 139 |
+
Architecture:
|
| 140 |
+
- Convolution
|
| 141 |
+
- Dense Connections
|
| 142 |
+
- Global Average Pooling
|
| 143 |
+
- Max Pooling
|
| 144 |
+
- Residual Connection
|
| 145 |
+
- Selective Kernel
|
| 146 |
+
- Softmax
|
| 147 |
+
Tasks:
|
| 148 |
+
- Image Classification
|
| 149 |
+
Training Techniques:
|
| 150 |
+
- SGD with Momentum
|
| 151 |
+
- Weight Decay
|
| 152 |
+
Training Data:
|
| 153 |
+
- ImageNet
|
| 154 |
+
Training Resources: 8x GPUs
|
| 155 |
+
ID: skresnet34
|
| 156 |
+
LR: 0.1
|
| 157 |
+
Epochs: 100
|
| 158 |
+
Layers: 34
|
| 159 |
+
Crop Pct: '0.875'
|
| 160 |
+
Momentum: 0.9
|
| 161 |
+
Batch Size: 256
|
| 162 |
+
Image Size: '224'
|
| 163 |
+
Weight Decay: 4.0e-05
|
| 164 |
+
Interpolation: bicubic
|
| 165 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L165
|
| 166 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth
|
| 167 |
+
Results:
|
| 168 |
+
- Task: Image Classification
|
| 169 |
+
Dataset: ImageNet
|
| 170 |
+
Metrics:
|
| 171 |
+
Top 1 Accuracy: 76.93%
|
| 172 |
+
Top 5 Accuracy: 93.32%
|
| 173 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/skresnext.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SK-ResNeXt
|
| 2 |
+
|
| 3 |
+
**SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('skresnext50_32x4d', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{li2019selective,
|
| 76 |
+
title={Selective Kernel Networks},
|
| 77 |
+
author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1903.06586},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.CV}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: SKResNeXt
|
| 89 |
+
Paper:
|
| 90 |
+
Title: Selective Kernel Networks
|
| 91 |
+
URL: https://paperswithcode.com/paper/selective-kernel-networks
|
| 92 |
+
Models:
|
| 93 |
+
- Name: skresnext50_32x4d
|
| 94 |
+
In Collection: SKResNeXt
|
| 95 |
+
Metadata:
|
| 96 |
+
FLOPs: 5739845824
|
| 97 |
+
Parameters: 27480000
|
| 98 |
+
File Size: 110340975
|
| 99 |
+
Architecture:
|
| 100 |
+
- Convolution
|
| 101 |
+
- Dense Connections
|
| 102 |
+
- Global Average Pooling
|
| 103 |
+
- Grouped Convolution
|
| 104 |
+
- Max Pooling
|
| 105 |
+
- Residual Connection
|
| 106 |
+
- Selective Kernel
|
| 107 |
+
- Softmax
|
| 108 |
+
Tasks:
|
| 109 |
+
- Image Classification
|
| 110 |
+
Training Data:
|
| 111 |
+
- ImageNet
|
| 112 |
+
Training Resources: 8x GPUs
|
| 113 |
+
ID: skresnext50_32x4d
|
| 114 |
+
LR: 0.1
|
| 115 |
+
Epochs: 100
|
| 116 |
+
Layers: 50
|
| 117 |
+
Crop Pct: '0.875'
|
| 118 |
+
Momentum: 0.9
|
| 119 |
+
Batch Size: 256
|
| 120 |
+
Image Size: '224'
|
| 121 |
+
Weight Decay: 0.0001
|
| 122 |
+
Interpolation: bicubic
|
| 123 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210
|
| 124 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth
|
| 125 |
+
Results:
|
| 126 |
+
- Task: Image Classification
|
| 127 |
+
Dataset: ImageNet
|
| 128 |
+
Metrics:
|
| 129 |
+
Top 1 Accuracy: 80.15%
|
| 130 |
+
Top 5 Accuracy: 94.64%
|
| 131 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/spnasnet.md
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPNASNet
|
| 2 |
+
|
| 3 |
+
**Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours.
|
| 4 |
+
|
| 5 |
+
## How do I use this model on an image?
|
| 6 |
+
To load a pretrained model:
|
| 7 |
+
|
| 8 |
+
```python
|
| 9 |
+
import timm
|
| 10 |
+
model = timm.create_model('spnasnet_100', pretrained=True)
|
| 11 |
+
model.eval()
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
To load and preprocess the image:
|
| 15 |
+
```python
|
| 16 |
+
import urllib
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from timm.data import resolve_data_config
|
| 19 |
+
from timm.data.transforms_factory import create_transform
|
| 20 |
+
|
| 21 |
+
config = resolve_data_config({}, model=model)
|
| 22 |
+
transform = create_transform(**config)
|
| 23 |
+
|
| 24 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 25 |
+
urllib.request.urlretrieve(url, filename)
|
| 26 |
+
img = Image.open(filename).convert('RGB')
|
| 27 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
To get the model predictions:
|
| 31 |
+
```python
|
| 32 |
+
import torch
|
| 33 |
+
with torch.no_grad():
|
| 34 |
+
out = model(tensor)
|
| 35 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 36 |
+
print(probabilities.shape)
|
| 37 |
+
# prints: torch.Size([1000])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
To get the top-5 predictions class names:
|
| 41 |
+
```python
|
| 42 |
+
# Get imagenet class mappings
|
| 43 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 44 |
+
urllib.request.urlretrieve(url, filename)
|
| 45 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 46 |
+
categories = [s.strip() for s in f.readlines()]
|
| 47 |
+
|
| 48 |
+
# Print top categories per image
|
| 49 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 50 |
+
for i in range(top5_prob.size(0)):
|
| 51 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 52 |
+
# prints class names and probabilities like:
|
| 53 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page.
|
| 57 |
+
|
| 58 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 59 |
+
|
| 60 |
+
## How do I finetune this model?
|
| 61 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 62 |
+
```python
|
| 63 |
+
model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 64 |
+
```
|
| 65 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 66 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 67 |
+
|
| 68 |
+
## How do I train this model?
|
| 69 |
+
|
| 70 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 71 |
+
|
| 72 |
+
## Citation
|
| 73 |
+
|
| 74 |
+
```BibTeX
|
| 75 |
+
@misc{stamoulis2019singlepath,
|
| 76 |
+
title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours},
|
| 77 |
+
author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu},
|
| 78 |
+
year={2019},
|
| 79 |
+
eprint={1904.02877},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={cs.LG}
|
| 82 |
+
}
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
<!--
|
| 86 |
+
Type: model-index
|
| 87 |
+
Collections:
|
| 88 |
+
- Name: SPNASNet
|
| 89 |
+
Paper:
|
| 90 |
+
Title: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4
|
| 91 |
+
Hours'
|
| 92 |
+
URL: https://paperswithcode.com/paper/single-path-nas-designing-hardware-efficient
|
| 93 |
+
Models:
|
| 94 |
+
- Name: spnasnet_100
|
| 95 |
+
In Collection: SPNASNet
|
| 96 |
+
Metadata:
|
| 97 |
+
FLOPs: 442385600
|
| 98 |
+
Parameters: 4420000
|
| 99 |
+
File Size: 17902337
|
| 100 |
+
Architecture:
|
| 101 |
+
- Average Pooling
|
| 102 |
+
- Batch Normalization
|
| 103 |
+
- Convolution
|
| 104 |
+
- Depthwise Separable Convolution
|
| 105 |
+
- Dropout
|
| 106 |
+
- ReLU
|
| 107 |
+
Tasks:
|
| 108 |
+
- Image Classification
|
| 109 |
+
Training Data:
|
| 110 |
+
- ImageNet
|
| 111 |
+
ID: spnasnet_100
|
| 112 |
+
Crop Pct: '0.875'
|
| 113 |
+
Image Size: '224'
|
| 114 |
+
Interpolation: bilinear
|
| 115 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L995
|
| 116 |
+
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth
|
| 117 |
+
Results:
|
| 118 |
+
- Task: Image Classification
|
| 119 |
+
Dataset: ImageNet
|
| 120 |
+
Metrics:
|
| 121 |
+
Top 1 Accuracy: 74.08%
|
| 122 |
+
Top 5 Accuracy: 91.82%
|
| 123 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/ssl-resnet.md
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SSL ResNet
|
| 2 |
+
|
| 3 |
+
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
|
| 4 |
+
|
| 5 |
+
The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
|
| 6 |
+
|
| 7 |
+
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
|
| 8 |
+
|
| 9 |
+
## How do I use this model on an image?
|
| 10 |
+
To load a pretrained model:
|
| 11 |
+
|
| 12 |
+
```python
|
| 13 |
+
import timm
|
| 14 |
+
model = timm.create_model('ssl_resnet18', pretrained=True)
|
| 15 |
+
model.eval()
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
To load and preprocess the image:
|
| 19 |
+
```python
|
| 20 |
+
import urllib
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from timm.data import resolve_data_config
|
| 23 |
+
from timm.data.transforms_factory import create_transform
|
| 24 |
+
|
| 25 |
+
config = resolve_data_config({}, model=model)
|
| 26 |
+
transform = create_transform(**config)
|
| 27 |
+
|
| 28 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 29 |
+
urllib.request.urlretrieve(url, filename)
|
| 30 |
+
img = Image.open(filename).convert('RGB')
|
| 31 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
To get the model predictions:
|
| 35 |
+
```python
|
| 36 |
+
import torch
|
| 37 |
+
with torch.no_grad():
|
| 38 |
+
out = model(tensor)
|
| 39 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 40 |
+
print(probabilities.shape)
|
| 41 |
+
# prints: torch.Size([1000])
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
To get the top-5 predictions class names:
|
| 45 |
+
```python
|
| 46 |
+
# Get imagenet class mappings
|
| 47 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 48 |
+
urllib.request.urlretrieve(url, filename)
|
| 49 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 50 |
+
categories = [s.strip() for s in f.readlines()]
|
| 51 |
+
|
| 52 |
+
# Print top categories per image
|
| 53 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 54 |
+
for i in range(top5_prob.size(0)):
|
| 55 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 56 |
+
# prints class names and probabilities like:
|
| 57 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page.
|
| 61 |
+
|
| 62 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 63 |
+
|
| 64 |
+
## How do I finetune this model?
|
| 65 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 66 |
+
```python
|
| 67 |
+
model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 68 |
+
```
|
| 69 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 70 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 71 |
+
|
| 72 |
+
## How do I train this model?
|
| 73 |
+
|
| 74 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 75 |
+
|
| 76 |
+
## Citation
|
| 77 |
+
|
| 78 |
+
```BibTeX
|
| 79 |
+
@article{DBLP:journals/corr/abs-1905-00546,
|
| 80 |
+
author = {I. Zeki Yalniz and
|
| 81 |
+
Herv{\'{e}} J{\'{e}}gou and
|
| 82 |
+
Kan Chen and
|
| 83 |
+
Manohar Paluri and
|
| 84 |
+
Dhruv Mahajan},
|
| 85 |
+
title = {Billion-scale semi-supervised learning for image classification},
|
| 86 |
+
journal = {CoRR},
|
| 87 |
+
volume = {abs/1905.00546},
|
| 88 |
+
year = {2019},
|
| 89 |
+
url = {http://arxiv.org/abs/1905.00546},
|
| 90 |
+
archivePrefix = {arXiv},
|
| 91 |
+
eprint = {1905.00546},
|
| 92 |
+
timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
|
| 93 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
|
| 94 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 95 |
+
}
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
<!--
|
| 99 |
+
Type: model-index
|
| 100 |
+
Collections:
|
| 101 |
+
- Name: SSL ResNet
|
| 102 |
+
Paper:
|
| 103 |
+
Title: Billion-scale semi-supervised learning for image classification
|
| 104 |
+
URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
|
| 105 |
+
Models:
|
| 106 |
+
- Name: ssl_resnet18
|
| 107 |
+
In Collection: SSL ResNet
|
| 108 |
+
Metadata:
|
| 109 |
+
FLOPs: 2337073152
|
| 110 |
+
Parameters: 11690000
|
| 111 |
+
File Size: 46811375
|
| 112 |
+
Architecture:
|
| 113 |
+
- 1x1 Convolution
|
| 114 |
+
- Batch Normalization
|
| 115 |
+
- Bottleneck Residual Block
|
| 116 |
+
- Convolution
|
| 117 |
+
- Global Average Pooling
|
| 118 |
+
- Max Pooling
|
| 119 |
+
- ReLU
|
| 120 |
+
- Residual Block
|
| 121 |
+
- Residual Connection
|
| 122 |
+
- Softmax
|
| 123 |
+
Tasks:
|
| 124 |
+
- Image Classification
|
| 125 |
+
Training Techniques:
|
| 126 |
+
- SGD with Momentum
|
| 127 |
+
- Weight Decay
|
| 128 |
+
Training Data:
|
| 129 |
+
- ImageNet
|
| 130 |
+
- YFCC-100M
|
| 131 |
+
Training Resources: 64x GPUs
|
| 132 |
+
ID: ssl_resnet18
|
| 133 |
+
LR: 0.0015
|
| 134 |
+
Epochs: 30
|
| 135 |
+
Layers: 18
|
| 136 |
+
Crop Pct: '0.875'
|
| 137 |
+
Batch Size: 1536
|
| 138 |
+
Image Size: '224'
|
| 139 |
+
Weight Decay: 0.0001
|
| 140 |
+
Interpolation: bilinear
|
| 141 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894
|
| 142 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth
|
| 143 |
+
Results:
|
| 144 |
+
- Task: Image Classification
|
| 145 |
+
Dataset: ImageNet
|
| 146 |
+
Metrics:
|
| 147 |
+
Top 1 Accuracy: 72.62%
|
| 148 |
+
Top 5 Accuracy: 91.42%
|
| 149 |
+
- Name: ssl_resnet50
|
| 150 |
+
In Collection: SSL ResNet
|
| 151 |
+
Metadata:
|
| 152 |
+
FLOPs: 5282531328
|
| 153 |
+
Parameters: 25560000
|
| 154 |
+
File Size: 102480594
|
| 155 |
+
Architecture:
|
| 156 |
+
- 1x1 Convolution
|
| 157 |
+
- Batch Normalization
|
| 158 |
+
- Bottleneck Residual Block
|
| 159 |
+
- Convolution
|
| 160 |
+
- Global Average Pooling
|
| 161 |
+
- Max Pooling
|
| 162 |
+
- ReLU
|
| 163 |
+
- Residual Block
|
| 164 |
+
- Residual Connection
|
| 165 |
+
- Softmax
|
| 166 |
+
Tasks:
|
| 167 |
+
- Image Classification
|
| 168 |
+
Training Techniques:
|
| 169 |
+
- SGD with Momentum
|
| 170 |
+
- Weight Decay
|
| 171 |
+
Training Data:
|
| 172 |
+
- ImageNet
|
| 173 |
+
- YFCC-100M
|
| 174 |
+
Training Resources: 64x GPUs
|
| 175 |
+
ID: ssl_resnet50
|
| 176 |
+
LR: 0.0015
|
| 177 |
+
Epochs: 30
|
| 178 |
+
Layers: 50
|
| 179 |
+
Crop Pct: '0.875'
|
| 180 |
+
Batch Size: 1536
|
| 181 |
+
Image Size: '224'
|
| 182 |
+
Weight Decay: 0.0001
|
| 183 |
+
Interpolation: bilinear
|
| 184 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904
|
| 185 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth
|
| 186 |
+
Results:
|
| 187 |
+
- Task: Image Classification
|
| 188 |
+
Dataset: ImageNet
|
| 189 |
+
Metrics:
|
| 190 |
+
Top 1 Accuracy: 79.24%
|
| 191 |
+
Top 5 Accuracy: 94.83%
|
| 192 |
+
-->
|
testbed/huggingface__pytorch-image-models/docs/models/ssl-resnext.md
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SSL ResNeXT
|
| 2 |
+
|
| 3 |
+
A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width.
|
| 4 |
+
|
| 5 |
+
The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
|
| 6 |
+
|
| 7 |
+
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
|
| 8 |
+
|
| 9 |
+
## How do I use this model on an image?
|
| 10 |
+
To load a pretrained model:
|
| 11 |
+
|
| 12 |
+
```python
|
| 13 |
+
import timm
|
| 14 |
+
model = timm.create_model('ssl_resnext101_32x16d', pretrained=True)
|
| 15 |
+
model.eval()
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
To load and preprocess the image:
|
| 19 |
+
```python
|
| 20 |
+
import urllib
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from timm.data import resolve_data_config
|
| 23 |
+
from timm.data.transforms_factory import create_transform
|
| 24 |
+
|
| 25 |
+
config = resolve_data_config({}, model=model)
|
| 26 |
+
transform = create_transform(**config)
|
| 27 |
+
|
| 28 |
+
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
|
| 29 |
+
urllib.request.urlretrieve(url, filename)
|
| 30 |
+
img = Image.open(filename).convert('RGB')
|
| 31 |
+
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
To get the model predictions:
|
| 35 |
+
```python
|
| 36 |
+
import torch
|
| 37 |
+
with torch.no_grad():
|
| 38 |
+
out = model(tensor)
|
| 39 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 40 |
+
print(probabilities.shape)
|
| 41 |
+
# prints: torch.Size([1000])
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
To get the top-5 predictions class names:
|
| 45 |
+
```python
|
| 46 |
+
# Get imagenet class mappings
|
| 47 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 48 |
+
urllib.request.urlretrieve(url, filename)
|
| 49 |
+
with open("imagenet_classes.txt", "r") as f:
|
| 50 |
+
categories = [s.strip() for s in f.readlines()]
|
| 51 |
+
|
| 52 |
+
# Print top categories per image
|
| 53 |
+
top5_prob, top5_catid = torch.topk(probabilities, 5)
|
| 54 |
+
for i in range(top5_prob.size(0)):
|
| 55 |
+
print(categories[top5_catid[i]], top5_prob[i].item())
|
| 56 |
+
# prints class names and probabilities like:
|
| 57 |
+
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Replace the model name with the variant you want to use, e.g. `ssl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page.
|
| 61 |
+
|
| 62 |
+
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
|
| 63 |
+
|
| 64 |
+
## How do I finetune this model?
|
| 65 |
+
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
|
| 66 |
+
```python
|
| 67 |
+
model = timm.create_model('ssl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
|
| 68 |
+
```
|
| 69 |
+
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
|
| 70 |
+
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
|
| 71 |
+
|
| 72 |
+
## How do I train this model?
|
| 73 |
+
|
| 74 |
+
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
|
| 75 |
+
|
| 76 |
+
## Citation
|
| 77 |
+
|
| 78 |
+
```BibTeX
|
| 79 |
+
@article{DBLP:journals/corr/abs-1905-00546,
|
| 80 |
+
author = {I. Zeki Yalniz and
|
| 81 |
+
Herv{\'{e}} J{\'{e}}gou and
|
| 82 |
+
Kan Chen and
|
| 83 |
+
Manohar Paluri and
|
| 84 |
+
Dhruv Mahajan},
|
| 85 |
+
title = {Billion-scale semi-supervised learning for image classification},
|
| 86 |
+
journal = {CoRR},
|
| 87 |
+
volume = {abs/1905.00546},
|
| 88 |
+
year = {2019},
|
| 89 |
+
url = {http://arxiv.org/abs/1905.00546},
|
| 90 |
+
archivePrefix = {arXiv},
|
| 91 |
+
eprint = {1905.00546},
|
| 92 |
+
timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
|
| 93 |
+
biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
|
| 94 |
+
bibsource = {dblp computer science bibliography, https://dblp.org}
|
| 95 |
+
}
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
<!--
|
| 99 |
+
Type: model-index
|
| 100 |
+
Collections:
|
| 101 |
+
- Name: SSL ResNext
|
| 102 |
+
Paper:
|
| 103 |
+
Title: Billion-scale semi-supervised learning for image classification
|
| 104 |
+
URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
|
| 105 |
+
Models:
|
| 106 |
+
- Name: ssl_resnext101_32x16d
|
| 107 |
+
In Collection: SSL ResNext
|
| 108 |
+
Metadata:
|
| 109 |
+
FLOPs: 46623691776
|
| 110 |
+
Parameters: 194030000
|
| 111 |
+
File Size: 777518664
|
| 112 |
+
Architecture:
|
| 113 |
+
- 1x1 Convolution
|
| 114 |
+
- Batch Normalization
|
| 115 |
+
- Convolution
|
| 116 |
+
- Global Average Pooling
|
| 117 |
+
- Grouped Convolution
|
| 118 |
+
- Max Pooling
|
| 119 |
+
- ReLU
|
| 120 |
+
- ResNeXt Block
|
| 121 |
+
- Residual Connection
|
| 122 |
+
- Softmax
|
| 123 |
+
Tasks:
|
| 124 |
+
- Image Classification
|
| 125 |
+
Training Techniques:
|
| 126 |
+
- SGD with Momentum
|
| 127 |
+
- Weight Decay
|
| 128 |
+
Training Data:
|
| 129 |
+
- ImageNet
|
| 130 |
+
- YFCC-100M
|
| 131 |
+
Training Resources: 64x GPUs
|
| 132 |
+
ID: ssl_resnext101_32x16d
|
| 133 |
+
LR: 0.0015
|
| 134 |
+
Epochs: 30
|
| 135 |
+
Layers: 101
|
| 136 |
+
Crop Pct: '0.875'
|
| 137 |
+
Batch Size: 1536
|
| 138 |
+
Image Size: '224'
|
| 139 |
+
Weight Decay: 0.0001
|
| 140 |
+
Interpolation: bilinear
|
| 141 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L944
|
| 142 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth
|
| 143 |
+
Results:
|
| 144 |
+
- Task: Image Classification
|
| 145 |
+
Dataset: ImageNet
|
| 146 |
+
Metrics:
|
| 147 |
+
Top 1 Accuracy: 81.84%
|
| 148 |
+
Top 5 Accuracy: 96.09%
|
| 149 |
+
- Name: ssl_resnext101_32x4d
|
| 150 |
+
In Collection: SSL ResNext
|
| 151 |
+
Metadata:
|
| 152 |
+
FLOPs: 10298145792
|
| 153 |
+
Parameters: 44180000
|
| 154 |
+
File Size: 177341913
|
| 155 |
+
Architecture:
|
| 156 |
+
- 1x1 Convolution
|
| 157 |
+
- Batch Normalization
|
| 158 |
+
- Convolution
|
| 159 |
+
- Global Average Pooling
|
| 160 |
+
- Grouped Convolution
|
| 161 |
+
- Max Pooling
|
| 162 |
+
- ReLU
|
| 163 |
+
- ResNeXt Block
|
| 164 |
+
- Residual Connection
|
| 165 |
+
- Softmax
|
| 166 |
+
Tasks:
|
| 167 |
+
- Image Classification
|
| 168 |
+
Training Techniques:
|
| 169 |
+
- SGD with Momentum
|
| 170 |
+
- Weight Decay
|
| 171 |
+
Training Data:
|
| 172 |
+
- ImageNet
|
| 173 |
+
- YFCC-100M
|
| 174 |
+
Training Resources: 64x GPUs
|
| 175 |
+
ID: ssl_resnext101_32x4d
|
| 176 |
+
LR: 0.0015
|
| 177 |
+
Epochs: 30
|
| 178 |
+
Layers: 101
|
| 179 |
+
Crop Pct: '0.875'
|
| 180 |
+
Batch Size: 1536
|
| 181 |
+
Image Size: '224'
|
| 182 |
+
Weight Decay: 0.0001
|
| 183 |
+
Interpolation: bilinear
|
| 184 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L924
|
| 185 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth
|
| 186 |
+
Results:
|
| 187 |
+
- Task: Image Classification
|
| 188 |
+
Dataset: ImageNet
|
| 189 |
+
Metrics:
|
| 190 |
+
Top 1 Accuracy: 80.91%
|
| 191 |
+
Top 5 Accuracy: 95.73%
|
| 192 |
+
- Name: ssl_resnext101_32x8d
|
| 193 |
+
In Collection: SSL ResNext
|
| 194 |
+
Metadata:
|
| 195 |
+
FLOPs: 21180417024
|
| 196 |
+
Parameters: 88790000
|
| 197 |
+
File Size: 356056638
|
| 198 |
+
Architecture:
|
| 199 |
+
- 1x1 Convolution
|
| 200 |
+
- Batch Normalization
|
| 201 |
+
- Convolution
|
| 202 |
+
- Global Average Pooling
|
| 203 |
+
- Grouped Convolution
|
| 204 |
+
- Max Pooling
|
| 205 |
+
- ReLU
|
| 206 |
+
- ResNeXt Block
|
| 207 |
+
- Residual Connection
|
| 208 |
+
- Softmax
|
| 209 |
+
Tasks:
|
| 210 |
+
- Image Classification
|
| 211 |
+
Training Techniques:
|
| 212 |
+
- SGD with Momentum
|
| 213 |
+
- Weight Decay
|
| 214 |
+
Training Data:
|
| 215 |
+
- ImageNet
|
| 216 |
+
- YFCC-100M
|
| 217 |
+
Training Resources: 64x GPUs
|
| 218 |
+
ID: ssl_resnext101_32x8d
|
| 219 |
+
LR: 0.0015
|
| 220 |
+
Epochs: 30
|
| 221 |
+
Layers: 101
|
| 222 |
+
Crop Pct: '0.875'
|
| 223 |
+
Batch Size: 1536
|
| 224 |
+
Image Size: '224'
|
| 225 |
+
Weight Decay: 0.0001
|
| 226 |
+
Interpolation: bilinear
|
| 227 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L934
|
| 228 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth
|
| 229 |
+
Results:
|
| 230 |
+
- Task: Image Classification
|
| 231 |
+
Dataset: ImageNet
|
| 232 |
+
Metrics:
|
| 233 |
+
Top 1 Accuracy: 81.61%
|
| 234 |
+
Top 5 Accuracy: 96.04%
|
| 235 |
+
- Name: ssl_resnext50_32x4d
|
| 236 |
+
In Collection: SSL ResNext
|
| 237 |
+
Metadata:
|
| 238 |
+
FLOPs: 5472648192
|
| 239 |
+
Parameters: 25030000
|
| 240 |
+
File Size: 100428550
|
| 241 |
+
Architecture:
|
| 242 |
+
- 1x1 Convolution
|
| 243 |
+
- Batch Normalization
|
| 244 |
+
- Convolution
|
| 245 |
+
- Global Average Pooling
|
| 246 |
+
- Grouped Convolution
|
| 247 |
+
- Max Pooling
|
| 248 |
+
- ReLU
|
| 249 |
+
- ResNeXt Block
|
| 250 |
+
- Residual Connection
|
| 251 |
+
- Softmax
|
| 252 |
+
Tasks:
|
| 253 |
+
- Image Classification
|
| 254 |
+
Training Techniques:
|
| 255 |
+
- SGD with Momentum
|
| 256 |
+
- Weight Decay
|
| 257 |
+
Training Data:
|
| 258 |
+
- ImageNet
|
| 259 |
+
- YFCC-100M
|
| 260 |
+
Training Resources: 64x GPUs
|
| 261 |
+
ID: ssl_resnext50_32x4d
|
| 262 |
+
LR: 0.0015
|
| 263 |
+
Epochs: 30
|
| 264 |
+
Layers: 50
|
| 265 |
+
Crop Pct: '0.875'
|
| 266 |
+
Batch Size: 1536
|
| 267 |
+
Image Size: '224'
|
| 268 |
+
Weight Decay: 0.0001
|
| 269 |
+
Interpolation: bilinear
|
| 270 |
+
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L914
|
| 271 |
+
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth
|
| 272 |
+
Results:
|
| 273 |
+
- Task: Image Classification
|
| 274 |
+
Dataset: ImageNet
|
| 275 |
+
Metrics:
|
| 276 |
+
Top 1 Accuracy: 80.3%
|
| 277 |
+
Top 5 Accuracy: 95.41%
|
| 278 |
+
-->
|